@@ -1687,14 +1687,32 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
const struct iommu_dma_msi_page *msi_page;
+ struct iommu_dma_cookie *cookie;
+ phys_addr_t msi_addr;
- msi_page = msi_desc_get_iommu_cookie(desc);
+ if (!domain || !domain->iova_cookie)
+ return;
- if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
+ cookie = domain->iova_cookie;
+ msi_page = msi_desc_get_iommu_cookie(desc);
+ if (!msi_page || msi_page->phys != msi_addr) {
+ msi_addr = ((u64)msg->address_hi << 32) | msg->address_lo;
+ msi_addr &= ~(phys_addr_t)(cookie_msi_granule(cookie) - 1);
+
+ msi_desc_set_iommu_cookie(desc, NULL);
+ list_for_each_entry(msi_page, &cookie->msi_page_list, list) {
+ if (msi_page->phys == msi_addr) {
+ msi_desc_set_iommu_cookie(desc, msi_page);
+ break;
+ }
+ }
+ msi_page = msi_desc_get_iommu_cookie(desc);
+ }
+ if (WARN_ON(!msi_page))
return;
msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
+ msg->address_lo &= cookie_msi_granule(cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
}
@@ -493,11 +493,18 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain,
int i, hwirq, err = 0;
unsigned int cpu;
- err = imsic_get_cpu(&imsic->lmask, false, &cpu);
- if (err)
- return err;
+ /* Map MSI address of all CPUs */
+ for_each_cpu(cpu, &imsic->lmask) {
+ err = imsic_cpu_page_phys(cpu, 0, &msi_addr);
+ if (err)
+ return err;
+
+ err = iommu_dma_prepare_msi(info->desc, msi_addr);
+ if (err)
+ return err;
+ }
- err = imsic_cpu_page_phys(cpu, 0, &msi_addr);
+ err = imsic_get_cpu(&imsic->lmask, false, &cpu);
if (err)
return err;
@@ -505,10 +512,6 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain,
if (hwirq < 0)
return hwirq;
- err = iommu_dma_prepare_msi(info->desc, msi_addr);
- if (err)
- goto fail;
-
for (i = 0; i < nr_irqs; i++) {
imsic_id_set_target(hwirq + i, cpu);
irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -528,10 +531,6 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain,
}
return 0;
-
-fail:
- imsic_ids_free(hwirq, get_count_order(nr_irqs));
- return err;
}
static void imsic_irq_domain_free(struct irq_domain *domain,
We have a separate RISC-V IMSIC MSI address for each CPU so changing MSI (or IRQ) affinity results in re-programming of MSI address in the PCIe (or platform) device. Currently, the iommu_dma_prepare_msi() is called only once at the time of IRQ allocation so IOMMU DMA domain will only have mapping for one MSI page. This means iommu_dma_compose_msi_msg() called by imsic_irq_compose_msi_msg() will always use the same MSI page irrespective to target CPU MSI address. In other words, changing MSI (or IRQ) affinity for device using IOMMU DMA domain will not work. To address the above issue, we do the following: 1) Map MSI pages for all CPUs in imsic_irq_domain_alloc() using iommu_dma_prepare_msi(). 2) Extend iommu_dma_compose_msi_msg() to lookup the correct msi_page whenever the msi_page stored as iommu cookie does not match. Reported-by: Vincent Chen <vincent.chen@sifive.com> Signed-off-by: Anup Patel <apatel@ventanamicro.com> --- drivers/iommu/dma-iommu.c | 24 +++++++++++++++++++++--- drivers/irqchip/irq-riscv-imsic.c | 23 +++++++++++------------ 2 files changed, 32 insertions(+), 15 deletions(-)