@@ -428,11 +428,12 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist,
iova_addr = iova->pfn_lo;
for_each_sg(sglist, sg, count, i) {
+ phys_addr_t iova_pa;
int ret;
- dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n",
- i, PFN_PHYS(iova_addr), &sg_dma_address(sg),
- sg_dma_len(sg));
+ iova_pa = PFN_PHYS(iova_addr);
+ dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n",
+ i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg));
ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
sg_dma_address(sg),
@@ -97,13 +97,15 @@ static void page_table_dump(struct ipu6_mmu_info *mmu_info)
for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
u32 l2_idx;
u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
+ phys_addr_t l2_phys;
if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
continue;
+
+ l2_phys = TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx];)
dev_dbg(mmu_info->dev,
- "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pa\n",
- l1_idx, iova, iova + ISP_PAGE_SIZE,
- TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
+ "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pap\n",
+ l1_idx, iova, iova + ISP_PAGE_SIZE, &l2_phys);
for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
u32 *l2_pt = mmu_info->l2_pts[l1_idx];
@@ -227,7 +229,7 @@ static u32 *alloc_l1_pt(struct ipu6_mmu_info *mmu_info)
}
mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
- dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
+ dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma);
return pt;
@@ -330,8 +332,8 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
dev_dbg(mmu_info->dev,
- "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
- iova_start, iova_end, size, paddr);
+ "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n",
+ iova_start, iova_end, size, &paddr);
return l2_map(mmu_info, iova_start, paddr, size);
}
@@ -363,8 +365,8 @@ static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
< iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
l2_pt = mmu_info->l2_pts[l1_idx];
dev_dbg(mmu_info->dev,
- "unmap l2 index %u with pteval 0x%10.10llx\n",
- l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ "unmap l2 index %u with pteval 0x%p\n",
+ l2_idx, &l2_pt[l2_idx]);
l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
clflush_cache_range((void *)&l2_pt[l2_idx],
@@ -525,9 +527,10 @@ static struct ipu6_mmu_info *ipu6_mmu_alloc(struct ipu6_device *isp)
return NULL;
mmu_info->aperture_start = 0;
- mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
- IPU6_MMU_ADDR_BITS :
- IPU6_MMU_ADDR_BITS_NON_SECURE);
+ mmu_info->aperture_end =
+ (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ?
+ IPU6_MMU_ADDR_BITS :
+ IPU6_MMU_ADDR_BITS_NON_SECURE);
mmu_info->pgsize_bitmap = SZ_4K;
mmu_info->dev = &isp->pdev->dev;
Fix printing DMA and physical address printing on 32-bit platforms, by using correct types. Also cast DMA_BIT_MASK() result to dma_addr_t to make Clang happy. Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com> --- drivers/media/pci/intel/ipu6/ipu6-dma.c | 7 ++++--- drivers/media/pci/intel/ipu6/ipu6-mmu.c | 25 ++++++++++++++----------- 2 files changed, 18 insertions(+), 14 deletions(-)