Message ID | 20180305160415.16760-38-andre.przywara@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | New VGIC(-v2) implementation | expand |
Hi Andre, On 03/05/2018 04:03 PM, Andre Przywara wrote: > As the enable register handlers are shared between the v2 and v3 > emulation, their implementation goes into vgic-mmio.c, to be easily > referenced from the v3 emulation as well later. > This introduces a vgic_sync_hardware_irq() function, which updates the > physical side of a hardware mapped virtual IRQ. > Because the existing locking order between vgic_irq->irq_lock and > irq_desc->lock dictates so, we dropu the irq_lock and retake them in the > proper order. > > Signed-off-by: Andre Przywara <andre.przywara@linaro.org> > --- > Changelog RFC ... v1: > - extend and move vgic_sync_hardware_irq() > - do proper locking sequence > - skip already disabled/enabled IRQs > > xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +- > xen/arch/arm/vgic/vgic-mmio.c | 117 +++++++++++++++++++++++++++++++++++++++ > xen/arch/arm/vgic/vgic-mmio.h | 11 ++++ > xen/arch/arm/vgic/vgic.c | 38 +++++++++++++ > xen/arch/arm/vgic/vgic.h | 3 + > 5 files changed, 171 insertions(+), 2 deletions(-) > > diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c > index 2e015ed0b1..3dd983f885 100644 > --- a/xen/arch/arm/vgic/vgic-mmio-v2.c > +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c > @@ -80,10 +80,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { > vgic_mmio_read_rao, vgic_mmio_write_wi, 1, > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, > - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, > + vgic_mmio_read_enable, vgic_mmio_write_senable, 1, > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, > - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, > + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, > VGIC_ACCESS_32bit), > REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, > vgic_mmio_read_raz, vgic_mmio_write_wi, 1, > diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c > index 284a92d288..f8f0252eff 100644 > --- a/xen/arch/arm/vgic/vgic-mmio.c > +++ b/xen/arch/arm/vgic/vgic-mmio.c > @@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, > /* Ignore */ > } > > +/* > + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value > + * of the enabled bit, so there is only one function for both here. > + */ > +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, > + paddr_t addr, unsigned int len) > +{ > + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); > + uint32_t value = 0; > + unsigned int i; > + > + /* Loop over all IRQs affected by this read */ > + for ( i = 0; i < len * 8; i++ ) > + { > + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); > + > + if ( irq->enabled ) > + value |= (1U << i); > + > + vgic_put_irq(vcpu->domain, irq); > + } > + > + return value; > +} > + > +void vgic_mmio_write_senable(struct vcpu *vcpu, > + paddr_t addr, unsigned int len, > + unsigned long val) > +{ > + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); > + unsigned int i; > + > + for_each_set_bit( i, &val, len * 8 ) > + { > + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); > + unsigned long flags; > + irq_desc_t *desc; > + > + spin_lock_irqsave(&irq->irq_lock, flags); > + > + if ( irq->enabled ) /* skip already enabled IRQs */ > + { > + spin_unlock_irqrestore(&irq->irq_lock, flags); > + vgic_put_irq(vcpu->domain, irq); > + continue; > + } > + > + irq->enabled = true; > + if ( irq->hw ) > + { > + /* > + * The irq cannot be a PPI, we only support delivery > + * of SPIs to guests. > + */ > + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); > + > + desc = irq_to_desc(irq->hwintid); > + } > + else > + desc = NULL; You could just initialize desc to NULL at the declaration time and drop the else part. > + > + vgic_queue_irq_unlock(vcpu->domain, irq, flags); > + > + if ( desc ) > + vgic_sync_hardware_irq(vcpu->domain, desc, irq); A comment explaining why desc is done outside the locking would be useful. This would avoid to loose time using git blame. > + > + vgic_put_irq(vcpu->domain, irq); > + } > +} > + > +void vgic_mmio_write_cenable(struct vcpu *vcpu, > + paddr_t addr, unsigned int len, > + unsigned long val) > +{ > + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); > + unsigned int i; > + > + for_each_set_bit( i, &val, len * 8 ) > + { > + struct vgic_irq *irq; > + unsigned long flags; > + irq_desc_t *desc; > + > + irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); > + spin_lock_irqsave(&irq->irq_lock, flags); > + > + if ( !irq->enabled ) /* skip already disabled IRQs */ > + { > + spin_unlock_irqrestore(&irq->irq_lock, flags); > + vgic_put_irq(vcpu->domain, irq); > + continue; > + } > + > + irq->enabled = false; > + > + if ( irq->hw ) > + { > + /* > + * The irq cannot be a PPI, we only support delivery > + * of SPIs to guests. > + */ > + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); > + > + desc = irq_to_desc(irq->hwintid); > + } > + else > + desc = NULL; > + > + spin_unlock_irqrestore(&irq->irq_lock, flags); > + > + if ( desc ) > + vgic_sync_hardware_irq(vcpu->domain, desc, irq); Ditto. > + > + vgic_put_irq(vcpu->domain, irq); > + } > +} > + > static int match_region(const void *key, const void *elt) > { > const unsigned int offset = (unsigned long)key; > diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h > index 621b9a281c..2ddcbbf58d 100644 > --- a/xen/arch/arm/vgic/vgic-mmio.h > +++ b/xen/arch/arm/vgic/vgic-mmio.h > @@ -96,6 +96,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, > void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, > unsigned int len, unsigned long val); > > +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, > + paddr_t addr, unsigned int len); > + > +void vgic_mmio_write_senable(struct vcpu *vcpu, > + paddr_t addr, unsigned int len, > + unsigned long val); > + > +void vgic_mmio_write_cenable(struct vcpu *vcpu, > + paddr_t addr, unsigned int len, > + unsigned long val); > + > unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); > > #endif > diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c > index 465a95f415..5246d7c2e7 100644 > --- a/xen/arch/arm/vgic/vgic.c > +++ b/xen/arch/arm/vgic/vgic.c > @@ -698,6 +698,44 @@ void vgic_kick_vcpus(struct domain *d) > } > } > > +static unsigned int translate_irq_type(bool is_level) > +{ > + return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING; > +} > + > +void vgic_sync_hardware_irq(struct domain *d, > + irq_desc_t *desc, struct vgic_irq *irq) > +{ > + unsigned long flags; > + > + spin_lock_irqsave(&desc->lock, flags); > + spin_lock(&irq->irq_lock); > + > + /* Is that association actually still valid? (we entered with no locks) */ If the association is not valid, then you need to fetch the new desc. Right? > + if ( desc->irq == irq->hwintid ) > + { > + if ( irq->enabled ) > + { > + /* > + * We might end up from various callers, so check that the > + * interrrupt is disabled before trying to change the config. > + */ > + if ( irq_type_set_by_domain(d) && > + test_bit(_IRQ_DISABLED, &desc->status) ) > + gic_set_irq_type(desc, translate_irq_type(irq->config)); > + > + if ( irq->target_vcpu ) > + irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor)); > + desc->handler->enable(desc); > + } > + else > + desc->handler->disable(desc); > + } > + > + spin_unlock(&irq->irq_lock); > + spin_unlock_irqrestore(&desc->lock, flags); > +} > + > /* > * Local variables: > * mode: C > diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h > index 588bd067b7..68e205d10a 100644 > --- a/xen/arch/arm/vgic/vgic.h > +++ b/xen/arch/arm/vgic/vgic.h > @@ -50,6 +50,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq) > atomic_inc(&irq->refcount); > } > > +void vgic_sync_hardware_irq(struct domain *d, > + irq_desc_t *desc, struct vgic_irq *irq); > + > void vgic_v2_fold_lr_state(struct vcpu *vcpu); > void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr); > void vgic_v2_set_underflow(struct vcpu *vcpu); > Cheers,
Hi, On 07/03/18 17:01, Julien Grall wrote: > Hi Andre, > > On 03/05/2018 04:03 PM, Andre Przywara wrote: >> As the enable register handlers are shared between the v2 and v3 >> emulation, their implementation goes into vgic-mmio.c, to be easily >> referenced from the v3 emulation as well later. >> This introduces a vgic_sync_hardware_irq() function, which updates the >> physical side of a hardware mapped virtual IRQ. >> Because the existing locking order between vgic_irq->irq_lock and >> irq_desc->lock dictates so, we dropu the irq_lock and retake them in the >> proper order. >> >> Signed-off-by: Andre Przywara <andre.przywara@linaro.org> >> --- >> Changelog RFC ... v1: >> - extend and move vgic_sync_hardware_irq() >> - do proper locking sequence >> - skip already disabled/enabled IRQs >> >> xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +- >> xen/arch/arm/vgic/vgic-mmio.c | 117 >> +++++++++++++++++++++++++++++++++++++++ >> xen/arch/arm/vgic/vgic-mmio.h | 11 ++++ >> xen/arch/arm/vgic/vgic.c | 38 +++++++++++++ >> xen/arch/arm/vgic/vgic.h | 3 + >> 5 files changed, 171 insertions(+), 2 deletions(-) >> >> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c >> b/xen/arch/arm/vgic/vgic-mmio-v2.c >> index 2e015ed0b1..3dd983f885 100644 >> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c >> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c >> @@ -80,10 +80,10 @@ static const struct vgic_register_region >> vgic_v2_dist_registers[] = { >> vgic_mmio_read_rao, vgic_mmio_write_wi, 1, >> VGIC_ACCESS_32bit), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, >> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, >> + vgic_mmio_read_enable, vgic_mmio_write_senable, 1, >> VGIC_ACCESS_32bit), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, >> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, >> + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, >> VGIC_ACCESS_32bit), >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, >> vgic_mmio_read_raz, vgic_mmio_write_wi, 1, >> diff --git a/xen/arch/arm/vgic/vgic-mmio.c >> b/xen/arch/arm/vgic/vgic-mmio.c >> index 284a92d288..f8f0252eff 100644 >> --- a/xen/arch/arm/vgic/vgic-mmio.c >> +++ b/xen/arch/arm/vgic/vgic-mmio.c >> @@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t >> addr, >> /* Ignore */ >> } >> +/* >> + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the >> value >> + * of the enabled bit, so there is only one function for both here. >> + */ >> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, >> + paddr_t addr, unsigned int len) >> +{ >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); >> + uint32_t value = 0; >> + unsigned int i; >> + >> + /* Loop over all IRQs affected by this read */ >> + for ( i = 0; i < len * 8; i++ ) >> + { >> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid >> + i); >> + >> + if ( irq->enabled ) >> + value |= (1U << i); >> + >> + vgic_put_irq(vcpu->domain, irq); >> + } >> + >> + return value; >> +} >> + >> +void vgic_mmio_write_senable(struct vcpu *vcpu, >> + paddr_t addr, unsigned int len, >> + unsigned long val) >> +{ >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); >> + unsigned int i; >> + >> + for_each_set_bit( i, &val, len * 8 ) >> + { >> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid >> + i); >> + unsigned long flags; >> + irq_desc_t *desc; >> + >> + spin_lock_irqsave(&irq->irq_lock, flags); >> + >> + if ( irq->enabled ) /* skip already enabled IRQs */ >> + { >> + spin_unlock_irqrestore(&irq->irq_lock, flags); >> + vgic_put_irq(vcpu->domain, irq); >> + continue; >> + } >> + >> + irq->enabled = true; >> + if ( irq->hw ) >> + { >> + /* >> + * The irq cannot be a PPI, we only support delivery >> + * of SPIs to guests. >> + */ >> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); >> + >> + desc = irq_to_desc(irq->hwintid); >> + } >> + else >> + desc = NULL; > > You could just initialize desc to NULL at the declaration time and drop > the else part. Can we rely on the initializer to be called on every loop iteration? I wasn't sure about this and what the standard has to say about this. >> + >> + vgic_queue_irq_unlock(vcpu->domain, irq, flags); >> + >> + if ( desc ) >> + vgic_sync_hardware_irq(vcpu->domain, desc, irq); > > A comment explaining why desc is done outside the locking would be > useful. This would avoid to loose time using git blame. > >> + >> + vgic_put_irq(vcpu->domain, irq); >> + } >> +} >> + >> +void vgic_mmio_write_cenable(struct vcpu *vcpu, >> + paddr_t addr, unsigned int len, >> + unsigned long val) >> +{ >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); >> + unsigned int i; >> + >> + for_each_set_bit( i, &val, len * 8 ) >> + { >> + struct vgic_irq *irq; >> + unsigned long flags; >> + irq_desc_t *desc; >> + >> + irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); >> + spin_lock_irqsave(&irq->irq_lock, flags); >> + >> + if ( !irq->enabled ) /* skip already disabled IRQs */ >> + { >> + spin_unlock_irqrestore(&irq->irq_lock, flags); >> + vgic_put_irq(vcpu->domain, irq); >> + continue; >> + } >> + >> + irq->enabled = false; >> + >> + if ( irq->hw ) >> + { >> + /* >> + * The irq cannot be a PPI, we only support delivery >> + * of SPIs to guests. >> + */ >> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); >> + >> + desc = irq_to_desc(irq->hwintid); >> + } >> + else >> + desc = NULL; >> + >> + spin_unlock_irqrestore(&irq->irq_lock, flags); >> + >> + if ( desc ) >> + vgic_sync_hardware_irq(vcpu->domain, desc, irq); > > Ditto. > >> + >> + vgic_put_irq(vcpu->domain, irq); >> + } >> +} >> + >> static int match_region(const void *key, const void *elt) >> { >> const unsigned int offset = (unsigned long)key; >> diff --git a/xen/arch/arm/vgic/vgic-mmio.h >> b/xen/arch/arm/vgic/vgic-mmio.h >> index 621b9a281c..2ddcbbf58d 100644 >> --- a/xen/arch/arm/vgic/vgic-mmio.h >> +++ b/xen/arch/arm/vgic/vgic-mmio.h >> @@ -96,6 +96,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, >> void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, >> unsigned int len, unsigned long val); >> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, >> + paddr_t addr, unsigned int len); >> + >> +void vgic_mmio_write_senable(struct vcpu *vcpu, >> + paddr_t addr, unsigned int len, >> + unsigned long val); >> + >> +void vgic_mmio_write_cenable(struct vcpu *vcpu, >> + paddr_t addr, unsigned int len, >> + unsigned long val); >> + >> unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); >> #endif >> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c >> index 465a95f415..5246d7c2e7 100644 >> --- a/xen/arch/arm/vgic/vgic.c >> +++ b/xen/arch/arm/vgic/vgic.c >> @@ -698,6 +698,44 @@ void vgic_kick_vcpus(struct domain *d) >> } >> } >> +static unsigned int translate_irq_type(bool is_level) >> +{ >> + return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING; >> +} >> + >> +void vgic_sync_hardware_irq(struct domain *d, >> + irq_desc_t *desc, struct vgic_irq *irq) >> +{ >> + unsigned long flags; >> + >> + spin_lock_irqsave(&desc->lock, flags); >> + spin_lock(&irq->irq_lock); >> + >> + /* Is that association actually still valid? (we entered with no >> locks) */ > > If the association is not valid, then you need to fetch the new desc. > Right? I am not so sure it's that easy. If the association changed, then the whole reason of this call might have become invalid. So I rather bail out here and do nothing. The check is just to prevent doing the wrong thing, not necessarily to always do the right thing. To be honest this whole "lock drop dance" is just to cope with the locking order, which I consider wrong, according to my gut feeling. This function here is called from several places, so it seems a bit fragile to assume a way how to fix a broken association here. I can go back and check every existing caller in this respect, but to be honest I'd rather change the locking order, so we don't need to worry about this. But I feel like we should do this as a fixup on top later. Cheers, Andre. > >> + if ( desc->irq == irq->hwintid ) >> + { >> + if ( irq->enabled ) >> + { >> + /* >> + * We might end up from various callers, so check that the >> + * interrrupt is disabled before trying to change the >> config. >> + */ >> + if ( irq_type_set_by_domain(d) && >> + test_bit(_IRQ_DISABLED, &desc->status) ) >> + gic_set_irq_type(desc, translate_irq_type(irq->config)); >> + >> + if ( irq->target_vcpu ) >> + irq_set_affinity(desc, >> cpumask_of(irq->target_vcpu->processor)); >> + desc->handler->enable(desc); >> + } >> + else >> + desc->handler->disable(desc); >> + } >> + >> + spin_unlock(&irq->irq_lock); >> + spin_unlock_irqrestore(&desc->lock, flags); >> +} >> + >> /* >> * Local variables: >> * mode: C >> diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h >> index 588bd067b7..68e205d10a 100644 >> --- a/xen/arch/arm/vgic/vgic.h >> +++ b/xen/arch/arm/vgic/vgic.h >> @@ -50,6 +50,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq >> *irq) >> atomic_inc(&irq->refcount); >> } >> +void vgic_sync_hardware_irq(struct domain *d, >> + irq_desc_t *desc, struct vgic_irq *irq); >> + >> void vgic_v2_fold_lr_state(struct vcpu *vcpu); >> void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, >> int lr); >> void vgic_v2_set_underflow(struct vcpu *vcpu); >> > > Cheers, >
(sorry for the formatting) On Wed, 7 Mar 2018, 18:23 Andre Przywara, <andre.przywara@linaro.org> wrote: > Hi, > > On 07/03/18 17:01, Julien Grall wrote: > > Hi Andre, > > > > On 03/05/2018 04:03 PM, Andre Przywara wrote: > >> As the enable register handlers are shared between the v2 and v3 > >> emulation, their implementation goes into vgic-mmio.c, to be easily > >> referenced from the v3 emulation as well later. > >> This introduces a vgic_sync_hardware_irq() function, which updates the > >> physical side of a hardware mapped virtual IRQ. > >> Because the existing locking order between vgic_irq->irq_lock and > >> irq_desc->lock dictates so, we dropu the irq_lock and retake them in the > >> proper order. > >> > >> Signed-off-by: Andre Przywara <andre.przywara@linaro.org> > >> --- > >> Changelog RFC ... v1: > >> - extend and move vgic_sync_hardware_irq() > >> - do proper locking sequence > >> - skip already disabled/enabled IRQs > >> > >> xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +- > >> xen/arch/arm/vgic/vgic-mmio.c | 117 > >> +++++++++++++++++++++++++++++++++++++++ > >> xen/arch/arm/vgic/vgic-mmio.h | 11 ++++ > >> xen/arch/arm/vgic/vgic.c | 38 +++++++++++++ > >> xen/arch/arm/vgic/vgic.h | 3 + > >> 5 files changed, 171 insertions(+), 2 deletions(-) > >> > >> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c > >> b/xen/arch/arm/vgic/vgic-mmio-v2.c > >> index 2e015ed0b1..3dd983f885 100644 > >> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c > >> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c > >> @@ -80,10 +80,10 @@ static const struct vgic_register_region > >> vgic_v2_dist_registers[] = { > >> vgic_mmio_read_rao, vgic_mmio_write_wi, 1, > >> VGIC_ACCESS_32bit), > >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, > >> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, > >> + vgic_mmio_read_enable, vgic_mmio_write_senable, 1, > >> VGIC_ACCESS_32bit), > >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, > >> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, > >> + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, > >> VGIC_ACCESS_32bit), > >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, > >> vgic_mmio_read_raz, vgic_mmio_write_wi, 1, > >> diff --git a/xen/arch/arm/vgic/vgic-mmio.c > >> b/xen/arch/arm/vgic/vgic-mmio.c > >> index 284a92d288..f8f0252eff 100644 > >> --- a/xen/arch/arm/vgic/vgic-mmio.c > >> +++ b/xen/arch/arm/vgic/vgic-mmio.c > >> @@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t > >> addr, > >> /* Ignore */ > >> } > >> +/* > >> + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the > >> value > >> + * of the enabled bit, so there is only one function for both here. > >> + */ > >> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, > >> + paddr_t addr, unsigned int len) > >> +{ > >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); > >> + uint32_t value = 0; > >> + unsigned int i; > >> + > >> + /* Loop over all IRQs affected by this read */ > >> + for ( i = 0; i < len * 8; i++ ) > >> + { > >> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid > >> + i); > >> + > >> + if ( irq->enabled ) > >> + value |= (1U << i); > >> + > >> + vgic_put_irq(vcpu->domain, irq); > >> + } > >> + > >> + return value; > >> +} > >> + > >> +void vgic_mmio_write_senable(struct vcpu *vcpu, > >> + paddr_t addr, unsigned int len, > >> + unsigned long val) > >> +{ > >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); > >> + unsigned int i; > >> + > >> + for_each_set_bit( i, &val, len * 8 ) > >> + { > >> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid > >> + i); > >> + unsigned long flags; > >> + irq_desc_t *desc; > >> + > >> + spin_lock_irqsave(&irq->irq_lock, flags); > >> + > >> + if ( irq->enabled ) /* skip already enabled IRQs */ > >> + { > >> + spin_unlock_irqrestore(&irq->irq_lock, flags); > >> + vgic_put_irq(vcpu->domain, irq); > >> + continue; > >> + } > >> + > >> + irq->enabled = true; > >> + if ( irq->hw ) > >> + { > >> + /* > >> + * The irq cannot be a PPI, we only support delivery > >> + * of SPIs to guests. > >> + */ > >> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); > >> + > >> + desc = irq_to_desc(irq->hwintid); > >> + } > >> + else > >> + desc = NULL; > > > > You could just initialize desc to NULL at the declaration time and drop > > the else part. > > Can we rely on the initializer to be called on every loop iteration? I > wasn't sure about this and what the standard has to say about this. > Every loop is a new scope. So everything declared within that scope is initialized again. We do use that extensively in Xen. > >> + > >> + vgic_queue_irq_unlock(vcpu->domain, irq, flags); > >> + > >> + if ( desc ) > >> + vgic_sync_hardware_irq(vcpu->domain, desc, irq); > > > > A comment explaining why desc is done outside the locking would be > > useful. This would avoid to loose time using git blame. > > > >> + > >> + vgic_put_irq(vcpu->domain, irq); > >> + } > >> +} > >> + > >> +void vgic_mmio_write_cenable(struct vcpu *vcpu, > >> + paddr_t addr, unsigned int len, > >> + unsigned long val) > >> +{ > >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); > >> + unsigned int i; > >> + > >> + for_each_set_bit( i, &val, len * 8 ) > >> + { > >> + struct vgic_irq *irq; > >> + unsigned long flags; > >> + irq_desc_t *desc; > >> + > >> + irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); > >> + spin_lock_irqsave(&irq->irq_lock, flags); > >> + > >> + if ( !irq->enabled ) /* skip already disabled IRQs > */ > >> + { > >> + spin_unlock_irqrestore(&irq->irq_lock, flags); > >> + vgic_put_irq(vcpu->domain, irq); > >> + continue; > >> + } > >> + > >> + irq->enabled = false; > >> + > >> + if ( irq->hw ) > >> + { > >> + /* > >> + * The irq cannot be a PPI, we only support delivery > >> + * of SPIs to guests. > >> + */ > >> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); > >> + > >> + desc = irq_to_desc(irq->hwintid); > >> + } > >> + else > >> + desc = NULL; > >> + > >> + spin_unlock_irqrestore(&irq->irq_lock, flags); > >> + > >> + if ( desc ) > >> + vgic_sync_hardware_irq(vcpu->domain, desc, irq); > > > > Ditto. > > > >> + > >> + vgic_put_irq(vcpu->domain, irq); > >> + } > >> +} > >> + > >> static int match_region(const void *key, const void *elt) > >> { > >> const unsigned int offset = (unsigned long)key; > >> diff --git a/xen/arch/arm/vgic/vgic-mmio.h > >> b/xen/arch/arm/vgic/vgic-mmio.h > >> index 621b9a281c..2ddcbbf58d 100644 > >> --- a/xen/arch/arm/vgic/vgic-mmio.h > >> +++ b/xen/arch/arm/vgic/vgic-mmio.h > >> @@ -96,6 +96,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, > >> void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, > >> unsigned int len, unsigned long val); > >> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, > >> + paddr_t addr, unsigned int len); > >> + > >> +void vgic_mmio_write_senable(struct vcpu *vcpu, > >> + paddr_t addr, unsigned int len, > >> + unsigned long val); > >> + > >> +void vgic_mmio_write_cenable(struct vcpu *vcpu, > >> + paddr_t addr, unsigned int len, > >> + unsigned long val); > >> + > >> unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); > >> #endif > >> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c > >> index 465a95f415..5246d7c2e7 100644 > >> --- a/xen/arch/arm/vgic/vgic.c > >> +++ b/xen/arch/arm/vgic/vgic.c > >> @@ -698,6 +698,44 @@ void vgic_kick_vcpus(struct domain *d) > >> } > >> } > >> +static unsigned int translate_irq_type(bool is_level) > >> +{ > >> + return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING; > >> +} > >> + > >> +void vgic_sync_hardware_irq(struct domain *d, > >> + irq_desc_t *desc, struct vgic_irq *irq) > >> +{ > >> + unsigned long flags; > >> + > >> + spin_lock_irqsave(&desc->lock, flags); > >> + spin_lock(&irq->irq_lock); > >> + > >> + /* Is that association actually still valid? (we entered with no > >> locks) */ > > > > If the association is not valid, then you need to fetch the new desc. > > Right? > > I am not so sure it's that easy. If the association changed, then the > whole reason of this call might have become invalid. So I rather bail > out here and do nothing. The check is just to prevent doing the wrong > thing, not necessarily to always do the right thing. > To be honest this whole "lock drop dance" is just to cope with the > locking order, which I consider wrong, according to my gut feeling. > If you don't do the dance here, you would have to do in other place. I still think taking the desc->lock first is the right thing to do as Xen deal with physical first then it might be a virtual (so second) or handled by a driver. > This function here is called from several places, so it seems a bit > fragile to assume a way how to fix a broken association here. I can go > back and check every existing caller in this respect, but to be honest > I'd rather change the locking order, so we don't need to worry about > this. But I feel like we should do this as a fixup on top later. > See some thought in the next patch. We might be able to simplify the whole logic by forbidding the interrupt to be removed. > Cheers, > Andre. > > > > > >> + if ( desc->irq == irq->hwintid ) > >> + { > >> + if ( irq->enabled ) > >> + { > >> + /* > >> + * We might end up from various callers, so check that the > >> + * interrrupt is disabled before trying to change the > >> config. > >> + */ > >> + if ( irq_type_set_by_domain(d) && > >> + test_bit(_IRQ_DISABLED, &desc->status) ) > >> + gic_set_irq_type(desc, > translate_irq_type(irq->config)); > >> + > >> + if ( irq->target_vcpu ) > >> + irq_set_affinity(desc, > >> cpumask_of(irq->target_vcpu->processor)); > >> + desc->handler->enable(desc); > >> + } > >> + else > >> + desc->handler->disable(desc); > >> + } > >> + > >> + spin_unlock(&irq->irq_lock); > >> + spin_unlock_irqrestore(&desc->lock, flags); > >> +} > >> + > >> /* > >> * Local variables: > >> * mode: C > >> diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h > >> index 588bd067b7..68e205d10a 100644 > >> --- a/xen/arch/arm/vgic/vgic.h > >> +++ b/xen/arch/arm/vgic/vgic.h > >> @@ -50,6 +50,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq > >> *irq) > >> atomic_inc(&irq->refcount); > >> } > >> +void vgic_sync_hardware_irq(struct domain *d, > >> + irq_desc_t *desc, struct vgic_irq *irq); > >> + > >> void vgic_v2_fold_lr_state(struct vcpu *vcpu); > >> void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, > >> int lr); > >> void vgic_v2_set_underflow(struct vcpu *vcpu); > >> > > > > Cheers, > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xenproject.org > https://lists.xenproject.org/mailman/listinfo/xen-devel <span>(sorry for the formatting)</span><br><br><div class="gmail_quote"><div dir="ltr">On Wed, 7 Mar 2018, 18:23 Andre Przywara, <<a href="mailto:andre.przywara@linaro.org">andre.przywara@linaro.org</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Hi,<br> <br> On 07/03/18 17:01, Julien Grall wrote:<br> > Hi Andre,<br> ><br> > On 03/05/2018 04:03 PM, Andre Przywara wrote:<br> >> As the enable register handlers are shared between the v2 and v3<br> >> emulation, their implementation goes into vgic-mmio.c, to be easily<br> >> referenced from the v3 emulation as well later.<br> >> This introduces a vgic_sync_hardware_irq() function, which updates the<br> >> physical side of a hardware mapped virtual IRQ.<br> >> Because the existing locking order between vgic_irq->irq_lock and<br> >> irq_desc->lock dictates so, we dropu the irq_lock and retake them in the<br> >> proper order.<br> >><br> >> Signed-off-by: Andre Przywara <<a href="mailto:andre.przywara@linaro.org" target="_blank">andre.przywara@linaro.org</a>><br> >> ---<br> >> Changelog RFC ... v1:<br> >> - extend and move vgic_sync_hardware_irq()<br> >> - do proper locking sequence<br> >> - skip already disabled/enabled IRQs<br> >><br> >> xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +-<br> >> xen/arch/arm/vgic/vgic-mmio.c | 117<br> >> +++++++++++++++++++++++++++++++++++++++<br> >> xen/arch/arm/vgic/vgic-mmio.h | 11 ++++<br> >> xen/arch/arm/vgic/vgic.c | 38 +++++++++++++<br> >> xen/arch/arm/vgic/vgic.h | 3 +<br> >> 5 files changed, 171 insertions(+), 2 deletions(-)<br> >><br> >> diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c<br> >> b/xen/arch/arm/vgic/vgic-mmio-v2.c<br> >> index 2e015ed0b1..3dd983f885 100644<br> >> --- a/xen/arch/arm/vgic/vgic-mmio-v2.c<br> >> +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c<br> >> @@ -80,10 +80,10 @@ static const struct vgic_register_region<br> >> vgic_v2_dist_registers[] = {<br> >> vgic_mmio_read_rao, vgic_mmio_write_wi, 1,<br> >> VGIC_ACCESS_32bit),<br> >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,<br> >> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1,<br> >> + vgic_mmio_read_enable, vgic_mmio_write_senable, 1,<br> >> VGIC_ACCESS_32bit),<br> >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,<br> >> - vgic_mmio_read_raz, vgic_mmio_write_wi, 1,<br> >> + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,<br> >> VGIC_ACCESS_32bit),<br> >> REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,<br> >> vgic_mmio_read_raz, vgic_mmio_write_wi, 1,<br> >> diff --git a/xen/arch/arm/vgic/vgic-mmio.c<br> >> b/xen/arch/arm/vgic/vgic-mmio.c<br> >> index 284a92d288..f8f0252eff 100644<br> >> --- a/xen/arch/arm/vgic/vgic-mmio.c<br> >> +++ b/xen/arch/arm/vgic/vgic-mmio.c<br> >> @@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t<br> >> addr,<br> >> /* Ignore */<br> >> }<br> >> +/*<br> >> + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the<br> >> value<br> >> + * of the enabled bit, so there is only one function for both here.<br> >> + */<br> >> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,<br> >> + paddr_t addr, unsigned int len)<br> >> +{<br> >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);<br> >> + uint32_t value = 0;<br> >> + unsigned int i;<br> >> +<br> >> + /* Loop over all IRQs affected by this read */<br> >> + for ( i = 0; i < len * 8; i++ )<br> >> + {<br> >> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid<br> >> + i);<br> >> +<br> >> + if ( irq->enabled )<br> >> + value |= (1U << i);<br> >> +<br> >> + vgic_put_irq(vcpu->domain, irq);<br> >> + }<br> >> +<br> >> + return value;<br> >> +}<br> >> +<br> >> +void vgic_mmio_write_senable(struct vcpu *vcpu,<br> >> + paddr_t addr, unsigned int len,<br> >> + unsigned long val)<br> >> +{<br> >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);<br> >> + unsigned int i;<br> >> +<br> >> + for_each_set_bit( i, &val, len * 8 )<br> >> + {<br> >> + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid<br> >> + i);<br> >> + unsigned long flags;<br> >> + irq_desc_t *desc;<br> >> +<br> >> + spin_lock_irqsave(&irq->irq_lock, flags);<br> >> +<br> >> + if ( irq->enabled ) /* skip already enabled IRQs */<br> >> + {<br> >> + spin_unlock_irqrestore(&irq->irq_lock, flags);<br> >> + vgic_put_irq(vcpu->domain, irq);<br> >> + continue;<br> >> + }<br> >> +<br> >> + irq->enabled = true;<br> >> + if ( irq->hw )<br> >> + {<br> >> + /*<br> >> + * The irq cannot be a PPI, we only support delivery<br> >> + * of SPIs to guests.<br> >> + */<br> >> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);<br> >> +<br> >> + desc = irq_to_desc(irq->hwintid);<br> >> + }<br> >> + else<br> >> + desc = NULL;<br> ><br> > You could just initialize desc to NULL at the declaration time and drop<br> > the else part.<br> <br> Can we rely on the initializer to be called on every loop iteration? I<br> wasn't sure about this and what the standard has to say about this.<br></blockquote></div><div><br></div><div>Every loop is a new scope. So everything declared within that scope is initialized again. We do use that extensively in Xen.</div><div><br></div><div><br></div><div class="gmail_quote"><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"> <br> >> +<br> >> + vgic_queue_irq_unlock(vcpu->domain, irq, flags);<br> >> +<br> >> + if ( desc )<br> >> + vgic_sync_hardware_irq(vcpu->domain, desc, irq);<br> ><br> > A comment explaining why desc is done outside the locking would be<br> > useful. This would avoid to loose time using git blame.<br> ><br> >> +<br> >> + vgic_put_irq(vcpu->domain, irq);<br> >> + }<br> >> +}<br> >> +<br> >> +void vgic_mmio_write_cenable(struct vcpu *vcpu,<br> >> + paddr_t addr, unsigned int len,<br> >> + unsigned long val)<br> >> +{<br> >> + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);<br> >> + unsigned int i;<br> >> +<br> >> + for_each_set_bit( i, &val, len * 8 )<br> >> + {<br> >> + struct vgic_irq *irq;<br> >> + unsigned long flags;<br> >> + irq_desc_t *desc;<br> >> +<br> >> + irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);<br> >> + spin_lock_irqsave(&irq->irq_lock, flags);<br> >> +<br> >> + if ( !irq->enabled ) /* skip already disabled IRQs */<br> >> + {<br> >> + spin_unlock_irqrestore(&irq->irq_lock, flags);<br> >> + vgic_put_irq(vcpu->domain, irq);<br> >> + continue;<br> >> + }<br> >> +<br> >> + irq->enabled = false;<br> >> +<br> >> + if ( irq->hw )<br> >> + {<br> >> + /*<br> >> + * The irq cannot be a PPI, we only support delivery<br> >> + * of SPIs to guests.<br> >> + */<br> >> + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);<br> >> +<br> >> + desc = irq_to_desc(irq->hwintid);<br> >> + }<br> >> + else<br> >> + desc = NULL;<br> >> +<br> >> + spin_unlock_irqrestore(&irq->irq_lock, flags);<br> >> +<br> >> + if ( desc )<br> >> + vgic_sync_hardware_irq(vcpu->domain, desc, irq);<br> ><br> > Ditto.<br> ><br> >> +<br> >> + vgic_put_irq(vcpu->domain, irq);<br> >> + }<br> >> +}<br> >> +<br> >> static int match_region(const void *key, const void *elt)<br> >> {<br> >> const unsigned int offset = (unsigned long)key;<br> >> diff --git a/xen/arch/arm/vgic/vgic-mmio.h<br> >> b/xen/arch/arm/vgic/vgic-mmio.h<br> >> index 621b9a281c..2ddcbbf58d 100644<br> >> --- a/xen/arch/arm/vgic/vgic-mmio.h<br> >> +++ b/xen/arch/arm/vgic/vgic-mmio.h<br> >> @@ -96,6 +96,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu,<br> >> void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,<br> >> unsigned int len, unsigned long val);<br> >> +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,<br> >> + paddr_t addr, unsigned int len);<br> >> +<br> >> +void vgic_mmio_write_senable(struct vcpu *vcpu,<br> >> + paddr_t addr, unsigned int len,<br> >> + unsigned long val);<br> >> +<br> >> +void vgic_mmio_write_cenable(struct vcpu *vcpu,<br> >> + paddr_t addr, unsigned int len,<br> >> + unsigned long val);<br> >> +<br> >> unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);<br> >> #endif<br> >> diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c<br> >> index 465a95f415..5246d7c2e7 100644<br> >> --- a/xen/arch/arm/vgic/vgic.c<br> >> +++ b/xen/arch/arm/vgic/vgic.c<br> >> @@ -698,6 +698,44 @@ void vgic_kick_vcpus(struct domain *d)<br> >> }<br> >> }<br> >> +static unsigned int translate_irq_type(bool is_level)<br> >> +{<br> >> + return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING;<br> >> +}<br> >> +<br> >> +void vgic_sync_hardware_irq(struct domain *d,<br> >> + irq_desc_t *desc, struct vgic_irq *irq)<br> >> +{<br> >> + unsigned long flags;<br> >> +<br> >> + spin_lock_irqsave(&desc->lock, flags);<br> >> + spin_lock(&irq->irq_lock);<br> >> +<br> >> + /* Is that association actually still valid? (we entered with no<br> >> locks) */<br> ><br> > If the association is not valid, then you need to fetch the new desc.<br> > Right?<br> <br> I am not so sure it's that easy. If the association changed, then the<br> whole reason of this call might have become invalid. So I rather bail<br> out here and do nothing. The check is just to prevent doing the wrong<br> thing, not necessarily to always do the right thing.<br> To be honest this whole "lock drop dance" is just to cope with the<br> locking order, which I consider wrong, according to my gut feeling.<br></blockquote></div><div><br></div><div>If you don't do the dance here, you would have to do in other place. I still think taking the desc->lock first is the right thing to do as Xen deal with physical first then it might be a virtual (so second) or handled by a driver.</div><div><br></div><div><br></div><div class="gmail_quote"><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"> <br> This function here is called from several places, so it seems a bit<br> fragile to assume a way how to fix a broken association here. I can go<br> back and check every existing caller in this respect, but to be honest<br> I'd rather change the locking order, so we don't need to worry about<br> this. But I feel like we should do this as a fixup on top later.<br></blockquote></div><div><br></div><div>See some thought in the next patch. We might be able to simplify the whole logic by forbidding the interrupt to be removed.</div><div><br></div><div><br></div><div class="gmail_quote"><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"> <br> Cheers,<br> Andre.<br> <br> <br> ><br> >> + if ( desc->irq == irq->hwintid )<br> >> + {<br> >> + if ( irq->enabled )<br> >> + {<br> >> + /*<br> >> + * We might end up from various callers, so check that the<br> >> + * interrrupt is disabled before trying to change the<br> >> config.<br> >> + */<br> >> + if ( irq_type_set_by_domain(d) &&<br> >> + test_bit(_IRQ_DISABLED, &desc->status) )<br> >> + gic_set_irq_type(desc, translate_irq_type(irq->config));<br> >> +<br> >> + if ( irq->target_vcpu )<br> >> + irq_set_affinity(desc,<br> >> cpumask_of(irq->target_vcpu->processor));<br> >> + desc->handler->enable(desc);<br> >> + }<br> >> + else<br> >> + desc->handler->disable(desc);<br> >> + }<br> >> +<br> >> + spin_unlock(&irq->irq_lock);<br> >> + spin_unlock_irqrestore(&desc->lock, flags);<br> >> +}<br> >> +<br> >> /*<br> >> * Local variables:<br> >> * mode: C<br> >> diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h<br> >> index 588bd067b7..68e205d10a 100644<br> >> --- a/xen/arch/arm/vgic/vgic.h<br> >> +++ b/xen/arch/arm/vgic/vgic.h<br> >> @@ -50,6 +50,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq<br> >> *irq)<br> >> atomic_inc(&irq->refcount);<br> >> }<br> >> +void vgic_sync_hardware_irq(struct domain *d,<br> >> + irq_desc_t *desc, struct vgic_irq *irq);<br> >> +<br> >> void vgic_v2_fold_lr_state(struct vcpu *vcpu);<br> >> void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq,<br> >> int lr);<br> >> void vgic_v2_set_underflow(struct vcpu *vcpu);<br> >><br> ><br> > Cheers,<br> ><br> <br> _______________________________________________<br> Xen-devel mailing list<br> <a href="mailto:Xen-devel@lists.xenproject.org" target="_blank">Xen-devel@lists.xenproject.org</a><br> <a href="https://lists.xenproject.org/mailman/listinfo/xen-devel" rel="noreferrer" target="_blank">https://lists.xenproject.org/mailman/listinfo/xen-devel</a></blockquote></div>
diff --git a/xen/arch/arm/vgic/vgic-mmio-v2.c b/xen/arch/arm/vgic/vgic-mmio-v2.c index 2e015ed0b1..3dd983f885 100644 --- a/xen/arch/arm/vgic/vgic-mmio-v2.c +++ b/xen/arch/arm/vgic/vgic-mmio-v2.c @@ -80,10 +80,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { vgic_mmio_read_rao, vgic_mmio_write_wi, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_enable, vgic_mmio_write_senable, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER, - vgic_mmio_read_raz, vgic_mmio_write_wi, 1, + vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR, vgic_mmio_read_raz, vgic_mmio_write_wi, 1, diff --git a/xen/arch/arm/vgic/vgic-mmio.c b/xen/arch/arm/vgic/vgic-mmio.c index 284a92d288..f8f0252eff 100644 --- a/xen/arch/arm/vgic/vgic-mmio.c +++ b/xen/arch/arm/vgic/vgic-mmio.c @@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, /* Ignore */ } +/* + * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value + * of the enabled bit, so there is only one function for both here. + */ +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, + paddr_t addr, unsigned int len) +{ + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); + uint32_t value = 0; + unsigned int i; + + /* Loop over all IRQs affected by this read */ + for ( i = 0; i < len * 8; i++ ) + { + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); + + if ( irq->enabled ) + value |= (1U << i); + + vgic_put_irq(vcpu->domain, irq); + } + + return value; +} + +void vgic_mmio_write_senable(struct vcpu *vcpu, + paddr_t addr, unsigned int len, + unsigned long val) +{ + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); + unsigned int i; + + for_each_set_bit( i, &val, len * 8 ) + { + struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); + unsigned long flags; + irq_desc_t *desc; + + spin_lock_irqsave(&irq->irq_lock, flags); + + if ( irq->enabled ) /* skip already enabled IRQs */ + { + spin_unlock_irqrestore(&irq->irq_lock, flags); + vgic_put_irq(vcpu->domain, irq); + continue; + } + + irq->enabled = true; + if ( irq->hw ) + { + /* + * The irq cannot be a PPI, we only support delivery + * of SPIs to guests. + */ + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); + + desc = irq_to_desc(irq->hwintid); + } + else + desc = NULL; + + vgic_queue_irq_unlock(vcpu->domain, irq, flags); + + if ( desc ) + vgic_sync_hardware_irq(vcpu->domain, desc, irq); + + vgic_put_irq(vcpu->domain, irq); + } +} + +void vgic_mmio_write_cenable(struct vcpu *vcpu, + paddr_t addr, unsigned int len, + unsigned long val) +{ + uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1); + unsigned int i; + + for_each_set_bit( i, &val, len * 8 ) + { + struct vgic_irq *irq; + unsigned long flags; + irq_desc_t *desc; + + irq = vgic_get_irq(vcpu->domain, vcpu, intid + i); + spin_lock_irqsave(&irq->irq_lock, flags); + + if ( !irq->enabled ) /* skip already disabled IRQs */ + { + spin_unlock_irqrestore(&irq->irq_lock, flags); + vgic_put_irq(vcpu->domain, irq); + continue; + } + + irq->enabled = false; + + if ( irq->hw ) + { + /* + * The irq cannot be a PPI, we only support delivery + * of SPIs to guests. + */ + ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS); + + desc = irq_to_desc(irq->hwintid); + } + else + desc = NULL; + + spin_unlock_irqrestore(&irq->irq_lock, flags); + + if ( desc ) + vgic_sync_hardware_irq(vcpu->domain, desc, irq); + + vgic_put_irq(vcpu->domain, irq); + } +} + static int match_region(const void *key, const void *elt) { const unsigned int offset = (unsigned long)key; diff --git a/xen/arch/arm/vgic/vgic-mmio.h b/xen/arch/arm/vgic/vgic-mmio.h index 621b9a281c..2ddcbbf58d 100644 --- a/xen/arch/arm/vgic/vgic-mmio.h +++ b/xen/arch/arm/vgic/vgic-mmio.h @@ -96,6 +96,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu, void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr, unsigned int len, unsigned long val); +unsigned long vgic_mmio_read_enable(struct vcpu *vcpu, + paddr_t addr, unsigned int len); + +void vgic_mmio_write_senable(struct vcpu *vcpu, + paddr_t addr, unsigned int len, + unsigned long val); + +void vgic_mmio_write_cenable(struct vcpu *vcpu, + paddr_t addr, unsigned int len, + unsigned long val); + unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev); #endif diff --git a/xen/arch/arm/vgic/vgic.c b/xen/arch/arm/vgic/vgic.c index 465a95f415..5246d7c2e7 100644 --- a/xen/arch/arm/vgic/vgic.c +++ b/xen/arch/arm/vgic/vgic.c @@ -698,6 +698,44 @@ void vgic_kick_vcpus(struct domain *d) } } +static unsigned int translate_irq_type(bool is_level) +{ + return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING; +} + +void vgic_sync_hardware_irq(struct domain *d, + irq_desc_t *desc, struct vgic_irq *irq) +{ + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + spin_lock(&irq->irq_lock); + + /* Is that association actually still valid? (we entered with no locks) */ + if ( desc->irq == irq->hwintid ) + { + if ( irq->enabled ) + { + /* + * We might end up from various callers, so check that the + * interrrupt is disabled before trying to change the config. + */ + if ( irq_type_set_by_domain(d) && + test_bit(_IRQ_DISABLED, &desc->status) ) + gic_set_irq_type(desc, translate_irq_type(irq->config)); + + if ( irq->target_vcpu ) + irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor)); + desc->handler->enable(desc); + } + else + desc->handler->disable(desc); + } + + spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&desc->lock, flags); +} + /* * Local variables: * mode: C diff --git a/xen/arch/arm/vgic/vgic.h b/xen/arch/arm/vgic/vgic.h index 588bd067b7..68e205d10a 100644 --- a/xen/arch/arm/vgic/vgic.h +++ b/xen/arch/arm/vgic/vgic.h @@ -50,6 +50,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq) atomic_inc(&irq->refcount); } +void vgic_sync_hardware_irq(struct domain *d, + irq_desc_t *desc, struct vgic_irq *irq); + void vgic_v2_fold_lr_state(struct vcpu *vcpu); void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_set_underflow(struct vcpu *vcpu);
As the enable register handlers are shared between the v2 and v3 emulation, their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. This introduces a vgic_sync_hardware_irq() function, which updates the physical side of a hardware mapped virtual IRQ. Because the existing locking order between vgic_irq->irq_lock and irq_desc->lock dictates so, we drop the irq_lock and retake them in the proper order. Signed-off-by: Andre Przywara <andre.przywara@linaro.org> --- Changelog RFC ... v1: - extend and move vgic_sync_hardware_irq() - do proper locking sequence - skip already disabled/enabled IRQs xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +- xen/arch/arm/vgic/vgic-mmio.c | 117 +++++++++++++++++++++++++++++++++++++++ xen/arch/arm/vgic/vgic-mmio.h | 11 ++++ xen/arch/arm/vgic/vgic.c | 38 +++++++++++++ xen/arch/arm/vgic/vgic.h | 3 + 5 files changed, 171 insertions(+), 2 deletions(-)