Message ID | 20220718135636.338264-1-jean-philippe@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | [v2] hw/virtio/virtio-iommu: Enforce power-of-two notify for both MAP and UNMAP | expand |
On 7/18/22 21:56, Jean-Philippe Brucker wrote: > Currently we only enforce power-of-two mappings (required by the QEMU > notifier) for UNMAP requests. A MAP request not aligned on a > power-of-two may be successfully handled by VFIO, and then the > corresponding UNMAP notify will fail because it will attempt to split > that mapping. Ensure MAP and UNMAP notifications are consistent. > > Fixes: dde3f08b5cab ("virtio-iommu: Handle non power of 2 range invalidations") > Reported-by: Tina Zhang <tina.zhang@intel.com> > Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Tested-by: Tina Zhang <tina.zhang@intel.com> Regards, -Tina > --- > v2: correct translated_addr increment > v1: https://lore.kernel.org/all/20220714095418.261387-1-jean-philippe@linaro.org/ > > --- > hw/virtio/virtio-iommu.c | 47 ++++++++++++++++++++++++---------------- > 1 file changed, 28 insertions(+), 19 deletions(-) > > diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c > index 281152d338..62e07ec2e4 100644 > --- a/hw/virtio/virtio-iommu.c > +++ b/hw/virtio/virtio-iommu.c > @@ -197,6 +197,32 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) > } > } > > +static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr, > + IOMMUTLBEvent *event, > + hwaddr virt_start, hwaddr virt_end) > +{ > + uint64_t delta = virt_end - virt_start; > + > + event->entry.iova = virt_start; > + event->entry.addr_mask = delta; > + > + if (delta == UINT64_MAX) { > + memory_region_notify_iommu(mr, 0, *event); > + } > + > + while (virt_start != virt_end + 1) { > + uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); > + > + event->entry.addr_mask = mask; > + event->entry.iova = virt_start; > + memory_region_notify_iommu(mr, 0, *event); > + virt_start += mask + 1; > + if (event->entry.perm != IOMMU_NONE) { > + event->entry.translated_addr += mask + 1; > + } > + } > +} > + > static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, > hwaddr virt_end, hwaddr paddr, > uint32_t flags) > @@ -215,19 +241,16 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, > > event.type = IOMMU_NOTIFIER_MAP; > event.entry.target_as = &address_space_memory; > - event.entry.addr_mask = virt_end - virt_start; > - event.entry.iova = virt_start; > event.entry.perm = perm; > event.entry.translated_addr = paddr; > > - memory_region_notify_iommu(mr, 0, event); > + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); > } > > static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, > hwaddr virt_end) > { > IOMMUTLBEvent event; > - uint64_t delta = virt_end - virt_start; > > if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { > return; > @@ -239,22 +262,8 @@ static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, > event.entry.target_as = &address_space_memory; > event.entry.perm = IOMMU_NONE; > event.entry.translated_addr = 0; > - event.entry.addr_mask = delta; > - event.entry.iova = virt_start; > - > - if (delta == UINT64_MAX) { > - memory_region_notify_iommu(mr, 0, event); > - } > > - > - while (virt_start != virt_end + 1) { > - uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); > - > - event.entry.addr_mask = mask; > - event.entry.iova = virt_start; > - memory_region_notify_iommu(mr, 0, event); > - virt_start += mask + 1; > - } > + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); > } > > static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 281152d338..62e07ec2e4 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -197,6 +197,32 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) } } +static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr, + IOMMUTLBEvent *event, + hwaddr virt_start, hwaddr virt_end) +{ + uint64_t delta = virt_end - virt_start; + + event->entry.iova = virt_start; + event->entry.addr_mask = delta; + + if (delta == UINT64_MAX) { + memory_region_notify_iommu(mr, 0, *event); + } + + while (virt_start != virt_end + 1) { + uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); + + event->entry.addr_mask = mask; + event->entry.iova = virt_start; + memory_region_notify_iommu(mr, 0, *event); + virt_start += mask + 1; + if (event->entry.perm != IOMMU_NONE) { + event->entry.translated_addr += mask + 1; + } + } +} + static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end, hwaddr paddr, uint32_t flags) @@ -215,19 +241,16 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, event.type = IOMMU_NOTIFIER_MAP; event.entry.target_as = &address_space_memory; - event.entry.addr_mask = virt_end - virt_start; - event.entry.iova = virt_start; event.entry.perm = perm; event.entry.translated_addr = paddr; - memory_region_notify_iommu(mr, 0, event); + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); } static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end) { IOMMUTLBEvent event; - uint64_t delta = virt_end - virt_start; if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { return; @@ -239,22 +262,8 @@ static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, event.entry.target_as = &address_space_memory; event.entry.perm = IOMMU_NONE; event.entry.translated_addr = 0; - event.entry.addr_mask = delta; - event.entry.iova = virt_start; - - if (delta == UINT64_MAX) { - memory_region_notify_iommu(mr, 0, event); - } - - while (virt_start != virt_end + 1) { - uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); - - event.entry.addr_mask = mask; - event.entry.iova = virt_start; - memory_region_notify_iommu(mr, 0, event); - virt_start += mask + 1; - } + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); } static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
Currently we only enforce power-of-two mappings (required by the QEMU notifier) for UNMAP requests. A MAP request not aligned on a power-of-two may be successfully handled by VFIO, and then the corresponding UNMAP notify will fail because it will attempt to split that mapping. Ensure MAP and UNMAP notifications are consistent. Fixes: dde3f08b5cab ("virtio-iommu: Handle non power of 2 range invalidations") Reported-by: Tina Zhang <tina.zhang@intel.com> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- v2: correct translated_addr increment v1: https://lore.kernel.org/all/20220714095418.261387-1-jean-philippe@linaro.org/ --- hw/virtio/virtio-iommu.c | 47 ++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 19 deletions(-)