diff mbox series

[v3,4/8] drm/panfrost: Split panfrost_mmu_map SG list mapping to its own function

Message ID 20190802195150.23207-5-robh@kernel.org
State Superseded
Headers show
Series drm/panfrost: Add heap and no execute buffer allocation | expand

Commit Message

Rob Herring (Arm) Aug. 2, 2019, 7:51 p.m. UTC
In preparation to create partial GPU mappings of BOs on page faults,
split out the SG list handling of panfrost_mmu_map().

Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Steven Price <steven.price@arm.com>
Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Signed-off-by: Rob Herring <robh@kernel.org>
---
 drivers/gpu/drm/panfrost/panfrost_mmu.c | 52 +++++++++++++++----------
 1 file changed, 31 insertions(+), 21 deletions(-)

Comments

Steven Price Aug. 8, 2019, 12:57 p.m. UTC | #1
On 02/08/2019 20:51, Rob Herring wrote:
> In preparation to create partial GPU mappings of BOs on page faults,
> split out the SG list handling of panfrost_mmu_map().
> 
> Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
> Cc: Boris Brezillon <boris.brezillon@collabora.com>
> Cc: Robin Murphy <robin.murphy@arm.com>
> Cc: Steven Price <steven.price@arm.com>
> Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
> Signed-off-by: Rob Herring <robh@kernel.org>

Reviewed-by: Steven Price <steven.price@arm.com>

(Although I don't see a change from the previous posting which already
had my R-b).

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 52 +++++++++++++++----------
>  1 file changed, 31 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 92ac995dd9c6..b4ac149b2399 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -145,27 +145,13 @@ static size_t get_pgsize(u64 addr, size_t size)
>  	return SZ_2M;
>  }
>  
> -int panfrost_mmu_map(struct panfrost_gem_object *bo)
> +static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova,
> +		      int prot, struct sg_table *sgt)
>  {
> -	struct drm_gem_object *obj = &bo->base.base;
> -	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
> -	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
> -	u64 iova = bo->node.start << PAGE_SHIFT;
>  	unsigned int count;
>  	struct scatterlist *sgl;
> -	struct sg_table *sgt;
> -	int ret;
> -
> -	if (WARN_ON(bo->is_mapped))
> -		return 0;
> -
> -	sgt = drm_gem_shmem_get_pages_sgt(obj);
> -	if (WARN_ON(IS_ERR(sgt)))
> -		return PTR_ERR(sgt);
> -
> -	ret = pm_runtime_get_sync(pfdev->dev);
> -	if (ret < 0)
> -		return ret;
> +	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
> +	u64 start_iova = iova;
>  
>  	mutex_lock(&pfdev->mmu->lock);
>  
> @@ -178,18 +164,42 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
>  		while (len) {
>  			size_t pgsize = get_pgsize(iova | paddr, len);
>  
> -			ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
> +			ops->map(ops, iova, paddr, pgsize, prot);
>  			iova += pgsize;
>  			paddr += pgsize;
>  			len -= pgsize;
>  		}
>  	}
>  
> -	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
> -			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
> +	mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova,
> +			    AS_COMMAND_FLUSH_PT);
>  
>  	mutex_unlock(&pfdev->mmu->lock);
>  
> +	return 0;
> +}
> +
> +int panfrost_mmu_map(struct panfrost_gem_object *bo)
> +{
> +	struct drm_gem_object *obj = &bo->base.base;
> +	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
> +	struct sg_table *sgt;
> +	int ret;
> +	int prot = IOMMU_READ | IOMMU_WRITE;
> +
> +	if (WARN_ON(bo->is_mapped))
> +		return 0;
> +
> +	sgt = drm_gem_shmem_get_pages_sgt(obj);
> +	if (WARN_ON(IS_ERR(sgt)))
> +		return PTR_ERR(sgt);
> +
> +	ret = pm_runtime_get_sync(pfdev->dev);
> +	if (ret < 0)
> +		return ret;
> +
> +	mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt);
> +
>  	pm_runtime_mark_last_busy(pfdev->dev);
>  	pm_runtime_put_autosuspend(pfdev->dev);
>  	bo->is_mapped = true;
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 92ac995dd9c6..b4ac149b2399 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -145,27 +145,13 @@  static size_t get_pgsize(u64 addr, size_t size)
 	return SZ_2M;
 }
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+static int mmu_map_sg(struct panfrost_device *pfdev, u64 iova,
+		      int prot, struct sg_table *sgt)
 {
-	struct drm_gem_object *obj = &bo->base.base;
-	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
-	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
-	u64 iova = bo->node.start << PAGE_SHIFT;
 	unsigned int count;
 	struct scatterlist *sgl;
-	struct sg_table *sgt;
-	int ret;
-
-	if (WARN_ON(bo->is_mapped))
-		return 0;
-
-	sgt = drm_gem_shmem_get_pages_sgt(obj);
-	if (WARN_ON(IS_ERR(sgt)))
-		return PTR_ERR(sgt);
-
-	ret = pm_runtime_get_sync(pfdev->dev);
-	if (ret < 0)
-		return ret;
+	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+	u64 start_iova = iova;
 
 	mutex_lock(&pfdev->mmu->lock);
 
@@ -178,18 +164,42 @@  int panfrost_mmu_map(struct panfrost_gem_object *bo)
 		while (len) {
 			size_t pgsize = get_pgsize(iova | paddr, len);
 
-			ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+			ops->map(ops, iova, paddr, pgsize, prot);
 			iova += pgsize;
 			paddr += pgsize;
 			len -= pgsize;
 		}
 	}
 
-	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
-			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+	mmu_hw_do_operation(pfdev, 0, start_iova, iova - start_iova,
+			    AS_COMMAND_FLUSH_PT);
 
 	mutex_unlock(&pfdev->mmu->lock);
 
+	return 0;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo)
+{
+	struct drm_gem_object *obj = &bo->base.base;
+	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+	struct sg_table *sgt;
+	int ret;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	if (WARN_ON(bo->is_mapped))
+		return 0;
+
+	sgt = drm_gem_shmem_get_pages_sgt(obj);
+	if (WARN_ON(IS_ERR(sgt)))
+		return PTR_ERR(sgt);
+
+	ret = pm_runtime_get_sync(pfdev->dev);
+	if (ret < 0)
+		return ret;
+
+	mmu_map_sg(pfdev, bo->node.start << PAGE_SHIFT, prot, sgt);
+
 	pm_runtime_mark_last_busy(pfdev->dev);
 	pm_runtime_put_autosuspend(pfdev->dev);
 	bo->is_mapped = true;