@@ -220,14 +220,19 @@ static hw_irq_controller gic_guest_irq_type = {
/*
* - needs to be called with a valid cpu_mask, ie each cpu in the mask has
* already called gic_cpu_init
+ * - desc.lock must be held
*/
-static void gic_set_irq_properties(unsigned int irq, bool_t level,
+static void gic_set_irq_properties(struct irq_desc *desc,
const cpumask_t *cpu_mask,
unsigned int priority)
{
volatile unsigned char *bytereg;
uint32_t cfg, edgebit;
unsigned int mask;
+ unsigned int irq = desc->irq;
+ unsigned int type = desc->arch.type;
+
+ ASSERT(spin_is_locked(&desc->lock));
spin_lock(&gic.lock);
@@ -236,9 +241,9 @@ static void gic_set_irq_properties(unsigned int irq, bool_t level,
/* Set edge / level */
cfg = GICD[GICD_ICFGR + irq / 16];
edgebit = 2u << (2 * (irq % 16));
- if ( level )
+ if ( type & DT_IRQ_TYPE_LEVEL_MASK )
cfg &= ~edgebit;
- else
+ else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
cfg |= edgebit;
GICD[GICD_ICFGR + irq / 16] = cfg;
@@ -256,8 +261,8 @@ static void gic_set_irq_properties(unsigned int irq, bool_t level,
/* Program the GIC to route an interrupt to the host (i.e. Xen)
* - needs to be called with desc.lock held
*/
-void gic_route_irq_to_xen(struct irq_desc *desc, bool_t level,
- const cpumask_t *cpu_mask, unsigned int priority)
+void gic_route_irq_to_xen(struct irq_desc *desc, const cpumask_t *cpu_mask,
+ unsigned int priority)
{
ASSERT(priority <= 0xff); /* Only 8 bits of priority */
ASSERT(desc->irq < gic.lines);/* Can't route interrupts that don't exist */
@@ -266,15 +271,14 @@ void gic_route_irq_to_xen(struct irq_desc *desc, bool_t level,
desc->handler = &gic_host_irq_type;
- gic_set_irq_properties(desc->irq, level, cpu_mask, priority);
+ gic_set_irq_properties(desc, cpu_mask, priority);
}
/* Program the GIC to route an interrupt to a guest
* - desc.lock must be held
*/
void gic_route_irq_to_guest(struct domain *d, struct irq_desc *desc,
- bool_t level, const cpumask_t *cpu_mask,
- unsigned int priority)
+ const cpumask_t *cpu_mask, unsigned int priority)
{
struct pending_irq *p;
ASSERT(spin_is_locked(&desc->lock));
@@ -282,8 +286,7 @@ void gic_route_irq_to_guest(struct domain *d, struct irq_desc *desc,
desc->handler = &gic_guest_irq_type;
desc->status |= IRQ_GUEST;
- gic_set_irq_properties(desc->irq, level, cpumask_of(smp_processor_id()),
- GIC_PRI_IRQ);
+ gic_set_irq_properties(desc, cpumask_of(smp_processor_id()), GIC_PRI_IRQ);
/* TODO: do not assume delivery to vcpu0 */
p = irq_to_pending(d->vcpu[0], desc->irq);
@@ -55,6 +55,7 @@ irq_desc_t *__irq_to_desc(int irq)
int __init arch_init_one_irq_desc(struct irq_desc *desc)
{
+ desc->arch.type = DT_IRQ_TYPE_NONE;
return 0;
}
@@ -82,6 +83,12 @@ static int __cpuinit init_local_irq_data(void)
init_one_irq_desc(desc);
desc->irq = irq;
desc->action = NULL;
+
+ /* PPIs are include in local_irqs, we have to copy the IRQ type from
+ * CPU0 otherwise we may miss the type if the IRQ type has been
+ * set early.
+ */
+ desc->arch.type = per_cpu(local_irq_desc, 0)[irq].arch.type;
}
return 0;
@@ -272,9 +279,6 @@ int setup_dt_irq(const struct dt_irq *irq, struct irqaction *new)
/* First time the IRQ is setup */
if ( disabled )
{
- bool_t level;
-
- level = dt_irq_is_level_triggered(irq);
/* It's fine to use smp_processor_id() because:
* For PPI: irq_desc is banked
* For SPI: we don't care for now which CPU will receive the
@@ -282,7 +286,8 @@ int setup_dt_irq(const struct dt_irq *irq, struct irqaction *new)
* TODO: Handle case where SPI is setup on different CPU than
* the targeted CPU and the priority.
*/
- gic_route_irq_to_xen(desc, level, cpumask_of(smp_processor_id()),
+ desc->arch.type = irq->type;
+ gic_route_irq_to_xen(desc, cpumask_of(smp_processor_id()),
GIC_PRI_IRQ);
desc->handler->startup(desc);
}
@@ -300,7 +305,6 @@ int route_dt_irq_to_guest(struct domain *d, const struct dt_irq *irq,
struct irq_desc *desc = irq_to_desc(irq->irq);
unsigned long flags;
int retval = 0;
- bool_t level;
action = xmalloc(struct irqaction);
if (!action)
@@ -341,10 +345,9 @@ int route_dt_irq_to_guest(struct domain *d, const struct dt_irq *irq,
goto out;
}
- level = dt_irq_is_level_triggered(irq);
- gic_route_irq_to_guest(d, desc, level, cpumask_of(smp_processor_id()),
+ desc->arch.type = irq->type;
+ gic_route_irq_to_guest(d, desc, cpumask_of(smp_processor_id()),
GIC_PRI_IRQ);
-
out:
spin_unlock_irqrestore(&desc->lock, flags);
return retval;
@@ -379,6 +382,67 @@ void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask)
BUG();
}
+static inline int irq_set_type(struct irq_desc *desc, unsigned int type)
+{
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ if ( type == DT_IRQ_TYPE_NONE )
+ return 0;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ if ( desc->arch.type != DT_IRQ_TYPE_NONE && desc->arch.type != type )
+ goto err;
+
+ desc->arch.type = type;
+
+ ret = 0;
+
+err:
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return ret;
+}
+
+unsigned int platform_get_irq(const struct dt_device_node *device,
+ int index)
+{
+ struct dt_irq dt_irq;
+ struct irq_desc *desc;
+ unsigned int type, irq;
+ int res;
+
+ res = dt_device_get_irq(device, index, &dt_irq);
+ if ( res )
+ return 0;
+
+ irq = dt_irq.irq;
+ type = dt_irq.type;
+
+ /* Setup the IRQ type */
+
+ if ( irq < NR_LOCAL_IRQS )
+ {
+ unsigned int cpu;
+ /* For PPIs, we need to set IRQ type on every online CPUs */
+ for_each_cpu( cpu, &cpu_online_map )
+ {
+ desc = &per_cpu(local_irq_desc, cpu)[irq];
+ res = irq_set_type(desc, type);
+ if ( res )
+ return 0;
+ }
+ }
+ else
+ {
+ res = irq_set_type(irq_to_desc(irq), type);
+ if ( res )
+ return 0;
+ }
+
+ return irq;
+}
+
/*
* Local variables:
* mode: C
@@ -687,6 +687,8 @@ void __init start_xen(unsigned long boot_phys_offset,
dt_unflatten_host_device_tree();
dt_irq_xlate = gic_irq_xlate;
+ init_IRQ();
+
dt_uart_init();
console_init_preirq();
@@ -716,7 +718,6 @@ void __init start_xen(unsigned long boot_phys_offset,
tasklet_subsys_init();
- init_IRQ();
xsm_dt_init();
@@ -171,11 +171,10 @@ extern void vgic_clear_pending_irqs(struct vcpu *v);
extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
/* Program the GIC to route an interrupt */
-extern void gic_route_irq_to_xen(struct irq_desc *desc, bool_t level,
- const cpumask_t *cpu_mask,
+extern void gic_route_irq_to_xen(struct irq_desc *desc, const cpumask_t *cpu_mask,
unsigned int priority);
extern void gic_route_irq_to_guest(struct domain *, struct irq_desc *desc,
- bool_t level, const cpumask_t *cpu_mask,
+ const cpumask_t *cpu_mask,
unsigned int priority);
extern void gic_inject(void);
@@ -16,6 +16,7 @@ struct arch_pirq
struct arch_irq_desc {
int eoi_cpu;
+ unsigned int type;
};
#define NR_LOCAL_IRQS 32
@@ -46,6 +47,8 @@ int setup_dt_irq(const struct dt_irq *irq, struct irqaction *new);
int route_dt_irq_to_guest(struct domain *d, const struct dt_irq *irq,
const char *devname);
+unsigned int platform_get_irq(const struct dt_device_node *device,
+ int index);
#endif /* _ASM_HW_IRQ_H */
/*
For now, ARM uses different IRQ functions to setup an interrupt handler. This is a bit annoying for common driver because we have to add idefery when an IRQ is setup (see ns16550_init_postirq for an example). To avoid to completely fork the IRQ management code, we can introduce a field to store the IRQ type (e.g level/edge ...). This patch also adds platform_get_irq which will retrieve the IRQ from the device tree and setup correctly the IRQ type. In order to use this solution, we have to move init_IRQ earlier for the boot CPU. It's fine because the code only depends on percpu. Signed-off-by: Julien Grall <julien.grall@linaro.org> --- Changes in v3: - irqflags is unsigned long not unsigned int - fix comment - don't need to set IRQ type when NONE is used (already set at startup). Changes in v2: - Patch added --- xen/arch/arm/gic.c | 23 +++++++------ xen/arch/arm/irq.c | 80 ++++++++++++++++++++++++++++++++++++++++----- xen/arch/arm/setup.c | 3 +- xen/include/asm-arm/gic.h | 5 ++- xen/include/asm-arm/irq.h | 3 ++ 5 files changed, 92 insertions(+), 22 deletions(-)