diff mbox series

dma-buf: system_heap: No separate allocation for attachment sg_tables

Message ID 20250417180943.1559755-1-tjmercier@google.com
State New
Headers show
Series dma-buf: system_heap: No separate allocation for attachment sg_tables | expand

Commit Message

T.J. Mercier April 17, 2025, 6:09 p.m. UTC
struct dma_heap_attachment is a separate allocation from the struct
sg_table it contains, but there is no reason for this. Let's use the
slab allocator just once instead of twice for dma_heap_attachment.

Signed-off-by: T.J. Mercier <tjmercier@google.com>
---
 drivers/dma-buf/heaps/system_heap.c | 43 ++++++++++++-----------------
 1 file changed, 17 insertions(+), 26 deletions(-)


base-commit: 8ffd015db85fea3e15a77027fda6c02ced4d2444

Comments

Christian König April 22, 2025, 8:24 a.m. UTC | #1
Am 17.04.25 um 20:09 schrieb T.J. Mercier:
> struct dma_heap_attachment is a separate allocation from the struct
> sg_table it contains, but there is no reason for this. Let's use the
> slab allocator just once instead of twice for dma_heap_attachment.
>
> Signed-off-by: T.J. Mercier <tjmercier@google.com>

I'm not *that* expert for this code, but looks totally reasonable to me.

Reviewed-by: Christian König <christian.koenig@amd.com>

Let me know if I should push that to drm-misc-next.

Regards,
Christian.

> ---
>  drivers/dma-buf/heaps/system_heap.c | 43 ++++++++++++-----------------
>  1 file changed, 17 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
> index 26d5dc89ea16..bee10c400cf0 100644
> --- a/drivers/dma-buf/heaps/system_heap.c
> +++ b/drivers/dma-buf/heaps/system_heap.c
> @@ -35,7 +35,7 @@ struct system_heap_buffer {
>  
>  struct dma_heap_attachment {
>  	struct device *dev;
> -	struct sg_table *table;
> +	struct sg_table table;
>  	struct list_head list;
>  	bool mapped;
>  };
> @@ -54,29 +54,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
>  static const unsigned int orders[] = {8, 4, 0};
>  #define NUM_ORDERS ARRAY_SIZE(orders)
>  
> -static struct sg_table *dup_sg_table(struct sg_table *table)
> +static int dup_sg_table(struct sg_table *from, struct sg_table *to)
>  {
> -	struct sg_table *new_table;
> -	int ret, i;
>  	struct scatterlist *sg, *new_sg;
> +	int ret, i;
>  
> -	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
> -	if (!new_table)
> -		return ERR_PTR(-ENOMEM);
> -
> -	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
> -	if (ret) {
> -		kfree(new_table);
> -		return ERR_PTR(-ENOMEM);
> -	}
> +	ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
> +	if (ret)
> +		return ret;
>  
> -	new_sg = new_table->sgl;
> -	for_each_sgtable_sg(table, sg, i) {
> +	new_sg = to->sgl;
> +	for_each_sgtable_sg(from, sg, i) {
>  		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
>  		new_sg = sg_next(new_sg);
>  	}
>  
> -	return new_table;
> +	return 0;
>  }
>  
>  static int system_heap_attach(struct dma_buf *dmabuf,
> @@ -84,19 +77,18 @@ static int system_heap_attach(struct dma_buf *dmabuf,
>  {
>  	struct system_heap_buffer *buffer = dmabuf->priv;
>  	struct dma_heap_attachment *a;
> -	struct sg_table *table;
> +	int ret;
>  
>  	a = kzalloc(sizeof(*a), GFP_KERNEL);
>  	if (!a)
>  		return -ENOMEM;
>  
> -	table = dup_sg_table(&buffer->sg_table);
> -	if (IS_ERR(table)) {
> +	ret = dup_sg_table(&buffer->sg_table, &a->table);
> +	if (ret) {
>  		kfree(a);
> -		return -ENOMEM;
> +		return ret;
>  	}
>  
> -	a->table = table;
>  	a->dev = attachment->dev;
>  	INIT_LIST_HEAD(&a->list);
>  	a->mapped = false;
> @@ -120,8 +112,7 @@ static void system_heap_detach(struct dma_buf *dmabuf,
>  	list_del(&a->list);
>  	mutex_unlock(&buffer->lock);
>  
> -	sg_free_table(a->table);
> -	kfree(a->table);
> +	sg_free_table(&a->table);
>  	kfree(a);
>  }
>  
> @@ -129,7 +120,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
>  						enum dma_data_direction direction)
>  {
>  	struct dma_heap_attachment *a = attachment->priv;
> -	struct sg_table *table = a->table;
> +	struct sg_table *table = &a->table;
>  	int ret;
>  
>  	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
> @@ -164,7 +155,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
>  	list_for_each_entry(a, &buffer->attachments, list) {
>  		if (!a->mapped)
>  			continue;
> -		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
> +		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
>  	}
>  	mutex_unlock(&buffer->lock);
>  
> @@ -185,7 +176,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
>  	list_for_each_entry(a, &buffer->attachments, list) {
>  		if (!a->mapped)
>  			continue;
> -		dma_sync_sgtable_for_device(a->dev, a->table, direction);
> +		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
>  	}
>  	mutex_unlock(&buffer->lock);
>  
>
> base-commit: 8ffd015db85fea3e15a77027fda6c02ced4d2444
T.J. Mercier April 22, 2025, 4:17 p.m. UTC | #2
On Tue, Apr 22, 2025 at 1:24 AM Christian König
<christian.koenig@amd.com> wrote:
>
> Am 17.04.25 um 20:09 schrieb T.J. Mercier:
> > struct dma_heap_attachment is a separate allocation from the struct
> > sg_table it contains, but there is no reason for this. Let's use the
> > slab allocator just once instead of twice for dma_heap_attachment.
> >
> > Signed-off-by: T.J. Mercier <tjmercier@google.com>
>
> I'm not *that* expert for this code, but looks totally reasonable to me.

I noticed this while reviewing Maxime Ripard's recent carveout heap
patches, where I was confused about sg_free_table() until I realized
it doesn't free the underlying allocation. Then I started looking at
other heaps and found that most of them do it this way (including the
cma heap), and figured it was a nice cleanup here.

> Reviewed-by: Christian König <christian.koenig@amd.com>
>
> Let me know if I should push that to drm-misc-next.
>
> Regards,
> Christian.

Thanks, yes please!




> > ---
> >  drivers/dma-buf/heaps/system_heap.c | 43 ++++++++++++-----------------
> >  1 file changed, 17 insertions(+), 26 deletions(-)
> >
> > diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
> > index 26d5dc89ea16..bee10c400cf0 100644
> > --- a/drivers/dma-buf/heaps/system_heap.c
> > +++ b/drivers/dma-buf/heaps/system_heap.c
> > @@ -35,7 +35,7 @@ struct system_heap_buffer {
> >
> >  struct dma_heap_attachment {
> >       struct device *dev;
> > -     struct sg_table *table;
> > +     struct sg_table table;
> >       struct list_head list;
> >       bool mapped;
> >  };
> > @@ -54,29 +54,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
> >  static const unsigned int orders[] = {8, 4, 0};
> >  #define NUM_ORDERS ARRAY_SIZE(orders)
> >
> > -static struct sg_table *dup_sg_table(struct sg_table *table)
> > +static int dup_sg_table(struct sg_table *from, struct sg_table *to)
> >  {
> > -     struct sg_table *new_table;
> > -     int ret, i;
> >       struct scatterlist *sg, *new_sg;
> > +     int ret, i;
> >
> > -     new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
> > -     if (!new_table)
> > -             return ERR_PTR(-ENOMEM);
> > -
> > -     ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
> > -     if (ret) {
> > -             kfree(new_table);
> > -             return ERR_PTR(-ENOMEM);
> > -     }
> > +     ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
> > +     if (ret)
> > +             return ret;
> >
> > -     new_sg = new_table->sgl;
> > -     for_each_sgtable_sg(table, sg, i) {
> > +     new_sg = to->sgl;
> > +     for_each_sgtable_sg(from, sg, i) {
> >               sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
> >               new_sg = sg_next(new_sg);
> >       }
> >
> > -     return new_table;
> > +     return 0;
> >  }
> >
> >  static int system_heap_attach(struct dma_buf *dmabuf,
> > @@ -84,19 +77,18 @@ static int system_heap_attach(struct dma_buf *dmabuf,
> >  {
> >       struct system_heap_buffer *buffer = dmabuf->priv;
> >       struct dma_heap_attachment *a;
> > -     struct sg_table *table;
> > +     int ret;
> >
> >       a = kzalloc(sizeof(*a), GFP_KERNEL);
> >       if (!a)
> >               return -ENOMEM;
> >
> > -     table = dup_sg_table(&buffer->sg_table);
> > -     if (IS_ERR(table)) {
> > +     ret = dup_sg_table(&buffer->sg_table, &a->table);
> > +     if (ret) {
> >               kfree(a);
> > -             return -ENOMEM;
> > +             return ret;
> >       }
> >
> > -     a->table = table;
> >       a->dev = attachment->dev;
> >       INIT_LIST_HEAD(&a->list);
> >       a->mapped = false;
> > @@ -120,8 +112,7 @@ static void system_heap_detach(struct dma_buf *dmabuf,
> >       list_del(&a->list);
> >       mutex_unlock(&buffer->lock);
> >
> > -     sg_free_table(a->table);
> > -     kfree(a->table);
> > +     sg_free_table(&a->table);
> >       kfree(a);
> >  }
> >
> > @@ -129,7 +120,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
> >                                               enum dma_data_direction direction)
> >  {
> >       struct dma_heap_attachment *a = attachment->priv;
> > -     struct sg_table *table = a->table;
> > +     struct sg_table *table = &a->table;
> >       int ret;
> >
> >       ret = dma_map_sgtable(attachment->dev, table, direction, 0);
> > @@ -164,7 +155,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
> >       list_for_each_entry(a, &buffer->attachments, list) {
> >               if (!a->mapped)
> >                       continue;
> > -             dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
> > +             dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
> >       }
> >       mutex_unlock(&buffer->lock);
> >
> > @@ -185,7 +176,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
> >       list_for_each_entry(a, &buffer->attachments, list) {
> >               if (!a->mapped)
> >                       continue;
> > -             dma_sync_sgtable_for_device(a->dev, a->table, direction);
> > +             dma_sync_sgtable_for_device(a->dev, &a->table, direction);
> >       }
> >       mutex_unlock(&buffer->lock);
> >
> >
> > base-commit: 8ffd015db85fea3e15a77027fda6c02ced4d2444
>
diff mbox series

Patch

diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 26d5dc89ea16..bee10c400cf0 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -35,7 +35,7 @@  struct system_heap_buffer {
 
 struct dma_heap_attachment {
 	struct device *dev;
-	struct sg_table *table;
+	struct sg_table table;
 	struct list_head list;
 	bool mapped;
 };
@@ -54,29 +54,22 @@  static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
 static const unsigned int orders[] = {8, 4, 0};
 #define NUM_ORDERS ARRAY_SIZE(orders)
 
-static struct sg_table *dup_sg_table(struct sg_table *table)
+static int dup_sg_table(struct sg_table *from, struct sg_table *to)
 {
-	struct sg_table *new_table;
-	int ret, i;
 	struct scatterlist *sg, *new_sg;
+	int ret, i;
 
-	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
-	if (!new_table)
-		return ERR_PTR(-ENOMEM);
-
-	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
-	if (ret) {
-		kfree(new_table);
-		return ERR_PTR(-ENOMEM);
-	}
+	ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
+	if (ret)
+		return ret;
 
-	new_sg = new_table->sgl;
-	for_each_sgtable_sg(table, sg, i) {
+	new_sg = to->sgl;
+	for_each_sgtable_sg(from, sg, i) {
 		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
 		new_sg = sg_next(new_sg);
 	}
 
-	return new_table;
+	return 0;
 }
 
 static int system_heap_attach(struct dma_buf *dmabuf,
@@ -84,19 +77,18 @@  static int system_heap_attach(struct dma_buf *dmabuf,
 {
 	struct system_heap_buffer *buffer = dmabuf->priv;
 	struct dma_heap_attachment *a;
-	struct sg_table *table;
+	int ret;
 
 	a = kzalloc(sizeof(*a), GFP_KERNEL);
 	if (!a)
 		return -ENOMEM;
 
-	table = dup_sg_table(&buffer->sg_table);
-	if (IS_ERR(table)) {
+	ret = dup_sg_table(&buffer->sg_table, &a->table);
+	if (ret) {
 		kfree(a);
-		return -ENOMEM;
+		return ret;
 	}
 
-	a->table = table;
 	a->dev = attachment->dev;
 	INIT_LIST_HEAD(&a->list);
 	a->mapped = false;
@@ -120,8 +112,7 @@  static void system_heap_detach(struct dma_buf *dmabuf,
 	list_del(&a->list);
 	mutex_unlock(&buffer->lock);
 
-	sg_free_table(a->table);
-	kfree(a->table);
+	sg_free_table(&a->table);
 	kfree(a);
 }
 
@@ -129,7 +120,7 @@  static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
 						enum dma_data_direction direction)
 {
 	struct dma_heap_attachment *a = attachment->priv;
-	struct sg_table *table = a->table;
+	struct sg_table *table = &a->table;
 	int ret;
 
 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
@@ -164,7 +155,7 @@  static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	list_for_each_entry(a, &buffer->attachments, list) {
 		if (!a->mapped)
 			continue;
-		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -185,7 +176,7 @@  static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 	list_for_each_entry(a, &buffer->attachments, list) {
 		if (!a->mapped)
 			continue;
-		dma_sync_sgtable_for_device(a->dev, a->table, direction);
+		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
 	}
 	mutex_unlock(&buffer->lock);