@@ -521,6 +521,9 @@ static void __iommu_dma_unmap_swiotlb(st
if (WARN_ON(!phys))
return;
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(phys, size, dir);
+
__iommu_dma_unmap(dev, dma_addr, size);
if (unlikely(is_swiotlb_buffer(dev, phys)))
@@ -871,8 +874,6 @@ static dma_addr_t iommu_dma_map_page(str
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
__iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
}
@@ -1089,14 +1090,14 @@ static void iommu_dma_unmap_sg(struct de
struct scatterlist *tmp;
int i;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
-
if (dev_is_untrusted(dev)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.