@@ -161,6 +161,7 @@ static void apic_update_vector(struct ir
apicd->move_in_progress = true;
apicd->prev_vector = apicd->vector;
apicd->prev_cpu = apicd->cpu;
+ WARN_ON_ONCE(apicd->cpu == newcpu);
} else {
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
managed);
@@ -910,7 +911,7 @@ void send_cleanup_vector(struct irq_cfg
__send_cleanup_vector(apicd);
}
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+void irq_complete_move(struct irq_cfg *cfg)
{
struct apic_chip_data *apicd;
@@ -918,15 +919,16 @@ static void __irq_complete_move(struct i
if (likely(!apicd->move_in_progress))
return;
- if (vector == apicd->vector && apicd->cpu == smp_processor_id())
+ /*
+ * If the interrupt arrived on the new target CPU, cleanup the
+ * vector on the old target CPU. A vector check is not required
+ * because an interrupt can never move from one vector to another
+ * on the same CPU.
+ */
+ if (apicd->cpu == smp_processor_id())
__send_cleanup_vector(apicd);
}
-void irq_complete_move(struct irq_cfg *cfg)
-{
- __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
/*
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
*/