@@ -4614,6 +4614,19 @@ F: include/linux/*fence.h
F: Documentation/driver-api/dma-buf.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
+DMA-BUF POOLS FRAMEWORK
+M: Laura Abbott <labbott@redhat.com>
+R: Liam Mark <lmark@codeaurora.org>
+R: Brian Starkey <Brian.Starkey@arm.com>
+R: "Andrew F. Davis" <afd@ti.com>
+R: John Stultz <john.stultz@linaro.org>
+S: Maintained
+L: linux-media@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
+F: drivers/dma-buf/pools/*
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vkoul@kernel.org>
L: dmaengine@vger.kernel.org
@@ -39,4 +39,6 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+source "drivers/dma-buf/pools/Kconfig"
+
endmenu
@@ -1,4 +1,5 @@
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+obj-y += pools/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
new file mode 100644
@@ -0,0 +1,10 @@
+menuconfig DMABUF_POOLS
+ bool "DMA-BUF Userland Memory Pools"
+ depends on HAS_DMA && MMU
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ help
+ Choose this option to enable the DMA-BUF userland, memory pools,
+ which allow userspace to allocate dma-bufs that can be shared between
+ drivers.
+ If you're not using Android its probably safe to say N here.
new file mode 100644
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMABUF_POOLS) += dmabuf-pools.o pool-ioctl.o pool-helpers.o
new file mode 100644
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/dma-buf/pools/dmabuf-pools.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/sched/task.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "dmabuf-pools.h"
+
+#define DEVNAME "dmabuf_pools"
+
+#define NUM_POOL_MINORS 128
+static DEFINE_IDR(dmabuf_pool_idr);
+static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
+
+struct dmabuf_pool_device {
+ struct rw_semaphore lock;
+ struct plist_head pools;
+ struct dentry *debug_root;
+ dev_t device_devt;
+ struct class *pool_class;
+};
+
+static struct dmabuf_pool_device *internal_dev;
+static int pool_id;
+
+/* this function should only be called while dev->lock is held */
+static struct dmabuf_pool_buffer *dmabuf_pool_buffer_create(
+ struct dmabuf_pool *pool,
+ struct dmabuf_pool_device *dev,
+ unsigned long len,
+ unsigned long flags)
+{
+ struct dmabuf_pool_buffer *buffer;
+ int ret;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->pool = pool;
+ buffer->flags = flags;
+ buffer->size = len;
+
+ ret = pool->ops->allocate(pool, buffer, len, flags);
+
+ if (ret) {
+ if (!(pool->flags & DMABUF_POOL_FLAG_DEFER_FREE))
+ goto err2;
+
+ dmabuf_pool_freelist_drain(pool, 0);
+ ret = pool->ops->allocate(pool, buffer, len, flags);
+ if (ret)
+ goto err2;
+ }
+
+ if (!buffer->sg_table) {
+ WARN_ONCE(1, "This pool needs to set the sgtable");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ spin_lock(&pool->stat_lock);
+ pool->num_of_buffers++;
+ pool->num_of_alloc_bytes += len;
+ if (pool->num_of_alloc_bytes > pool->alloc_bytes_wm)
+ pool->alloc_bytes_wm = pool->num_of_alloc_bytes;
+ spin_unlock(&pool->stat_lock);
+
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ return buffer;
+
+err1:
+ pool->ops->free(buffer);
+err2:
+ kfree(buffer);
+ return ERR_PTR(ret);
+}
+
+void dmabuf_pool_buffer_destroy(struct dmabuf_pool_buffer *buffer)
+{
+ if (buffer->kmap_cnt > 0) {
+ pr_warn_once("%s: buffer still mapped in the kernel\n",
+ __func__);
+ buffer->pool->ops->unmap_kernel(buffer->pool, buffer);
+ }
+ buffer->pool->ops->free(buffer);
+ spin_lock(&buffer->pool->stat_lock);
+ buffer->pool->num_of_buffers--;
+ buffer->pool->num_of_alloc_bytes -= buffer->size;
+ spin_unlock(&buffer->pool->stat_lock);
+
+ kfree(buffer);
+}
+
+static void _dmabuf_pool_buffer_destroy(struct dmabuf_pool_buffer *buffer)
+{
+ struct dmabuf_pool *pool = buffer->pool;
+
+ if (pool->flags & DMABUF_POOL_FLAG_DEFER_FREE)
+ dmabuf_pool_freelist_add(pool, buffer);
+ else
+ dmabuf_pool_buffer_destroy(buffer);
+}
+
+static void *dmabuf_pool_buffer_kmap_get(struct dmabuf_pool_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = buffer->pool->ops->map_kernel(buffer->pool, buffer);
+ if (WARN_ONCE(!vaddr,
+ "pool->ops->map_kernel should return ERR_PTR on error"))
+ return ERR_PTR(-EINVAL);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
+
+static void dmabuf_pool_buffer_kmap_put(struct dmabuf_pool_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->pool->ops->unmap_kernel(buffer->pool, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ memcpy(new_sg, sg, sizeof(*sg));
+ new_sg->dma_address = 0;
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+static void free_duped_table(struct sg_table *table)
+{
+ sg_free_table(table);
+ kfree(table);
+}
+
+struct dmabuf_pools_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+ enum dma_data_direction dir;
+};
+
+static int dmabuf_pool_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dmabuf_pools_attachment *a;
+ struct sg_table *table;
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = dup_sg_table(buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ a->table = table;
+ a->dev = attachment->dev;
+ a->dir = DMA_NONE;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void dmabuf_pool_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct dmabuf_pools_attachment *a = attachment->priv;
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+ struct sg_table *table;
+
+ if (!a)
+ return;
+
+ table = a->table;
+ if (table) {
+ if (a->dir != DMA_NONE)
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents,
+ a->dir);
+ sg_free_table(table);
+ }
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+ free_duped_table(a->table);
+
+ kfree(a);
+ attachment->priv = NULL;
+}
+
+static struct sg_table *dmabuf_pool_map_dma_buf(
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dmabuf_pools_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ if (WARN_ON(direction == DMA_NONE || !a))
+ return ERR_PTR(-EINVAL);
+
+ if (a->dir == direction)
+ return a->table;
+
+ if (WARN_ON(a->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
+
+ table = a->table;
+ if (!IS_ERR(table)) {
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction)) {
+ table = ERR_PTR(-ENOMEM);
+ } else {
+ a->dir = direction;
+ }
+ }
+ return table;
+}
+
+static void dmabuf_pool_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+}
+
+static int dmabuf_pool_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->pool->ops->map_user) {
+ pr_err("%s: this pool does not define a method for mapping to userspace\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!(buffer->flags & DMABUF_POOL_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->pool->ops->map_user(buffer->pool, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void dmabuf_pool_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+
+ _dmabuf_pool_buffer_destroy(buffer);
+}
+
+static void *dmabuf_pool_dma_buf_kmap(struct dma_buf *dmabuf,
+ unsigned long offset)
+{
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void dmabuf_pool_dma_buf_kunmap(struct dma_buf *dmabuf,
+ unsigned long offset,
+ void *ptr)
+{
+}
+
+static int dmabuf_pool_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ struct dmabuf_pools_attachment *a;
+ int ret = 0;
+
+ /*
+ * TODO: Move this elsewhere because we don't always need a vaddr
+ */
+ if (buffer->pool->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = dmabuf_pool_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto unlock;
+ }
+ mutex_unlock(&buffer->lock);
+ }
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ direction);
+ }
+
+unlock:
+ mutex_unlock(&buffer->lock);
+ return ret;
+}
+
+static int dmabuf_pool_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct dmabuf_pool_buffer *buffer = dmabuf->priv;
+ struct dmabuf_pools_attachment *a;
+
+ if (buffer->pool->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ dmabuf_pool_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static const struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = dmabuf_pool_map_dma_buf,
+ .unmap_dma_buf = dmabuf_pool_unmap_dma_buf,
+ .mmap = dmabuf_pool_mmap,
+ .release = dmabuf_pool_dma_buf_release,
+ .attach = dmabuf_pool_attach,
+ .detach = dmabuf_pool_detatch,
+ .begin_cpu_access = dmabuf_pool_dma_buf_begin_cpu_access,
+ .end_cpu_access = dmabuf_pool_dma_buf_end_cpu_access,
+ .map = dmabuf_pool_dma_buf_kmap,
+ .unmap = dmabuf_pool_dma_buf_kunmap,
+};
+
+int dmabuf_pool_alloc(struct dmabuf_pool *pool, size_t len, unsigned int flags)
+{
+ struct dmabuf_pool_device *dev = internal_dev;
+ struct dmabuf_pool_buffer *buffer = NULL;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ int fd;
+ struct dma_buf *dmabuf;
+
+ pr_debug("%s: pool: %s len %zu flags %x\n", __func__,
+ pool->name, len, flags);
+
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return -EINVAL;
+
+ down_read(&dev->lock);
+ buffer = dmabuf_pool_buffer_create(pool, dev, len, flags);
+ up_read(&dev->lock);
+
+ if (!buffer)
+ return -ENODEV;
+
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buffer;
+
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ _dmabuf_pool_buffer_destroy(buffer);
+ return PTR_ERR(dmabuf);
+ }
+
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
+
+ return fd;
+}
+
+static int dmabuf_pool_open(struct inode *inode, struct file *filp)
+{
+ struct dmabuf_pool *pool;
+
+ mutex_lock(&minor_lock);
+ pool = idr_find(&dmabuf_pool_idr, iminor(inode));
+ mutex_unlock(&minor_lock);
+ if (!pool) {
+ pr_debug("device: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ /* instance data as context */
+ filp->private_data = pool;
+ nonseekable_open(inode, filp);
+
+ return 0;
+}
+
+static int dmabuf_pool_release(struct inode *inode, struct file *filp)
+{
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+
+static const struct file_operations dmabuf_pool_fops = {
+ .owner = THIS_MODULE,
+ .open = dmabuf_pool_open,
+ .release = dmabuf_pool_release,
+ .unlocked_ioctl = dmabuf_pool_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dmabuf_pool_ioctl,
+#endif
+};
+
+static int debug_shrink_set(void *data, u64 val)
+{
+ struct dmabuf_pool *pool = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = GFP_HIGHUSER;
+ sc.nr_to_scan = val;
+
+ if (!val) {
+ objs = pool->shrinker.count_objects(&pool->shrinker, &sc);
+ sc.nr_to_scan = objs;
+ }
+
+ pool->shrinker.scan_objects(&pool->shrinker, &sc);
+ return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+ struct dmabuf_pool *pool = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = GFP_HIGHUSER;
+ sc.nr_to_scan = 0;
+
+ objs = pool->shrinker.count_objects(&pool->shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+ debug_shrink_set, "%llu\n");
+
+
+static int dmabuf_pool_get_minor(struct dmabuf_pool *pool)
+{
+ int retval = -ENOMEM;
+
+ mutex_lock(&minor_lock);
+ retval = idr_alloc(&dmabuf_pool_idr, pool, 0, NUM_POOL_MINORS,
+ GFP_KERNEL);
+ if (retval >= 0) {
+ pool->minor = retval;
+ retval = 0;
+ } else if (retval == -ENOSPC) {
+ printk("%s: Too many dmabuf-pools\n", __func__);
+ retval = -EINVAL;
+ }
+ mutex_unlock(&minor_lock);
+ return retval;
+}
+
+static void dmabuf_pool_free_minor(struct dmabuf_pool *pool)
+{
+ mutex_lock(&minor_lock);
+ idr_remove(&dmabuf_pool_idr, pool->minor);
+ mutex_unlock(&minor_lock);
+}
+
+
+void dmabuf_pool_add(struct dmabuf_pool *pool)
+{
+ struct dmabuf_pool_device *dev = internal_dev;
+ int ret;
+ struct device *dev_ret;
+ struct dentry *pool_root;
+ char debug_name[64];
+
+ if (!pool->ops->allocate || !pool->ops->free)
+ pr_err("%s: can not add pool with invalid ops struct.\n",
+ __func__);
+
+ /* determ minor number */
+ ret = dmabuf_pool_get_minor(pool);
+ if (ret) {
+ printk("%s: get minor number failed", __func__);
+ return;
+ }
+
+ /* create device */
+ pool->pool_devt = MKDEV(MAJOR(dev->device_devt), pool->minor);
+ dev_ret = device_create(dev->pool_class,
+ NULL,
+ pool->pool_devt,
+ NULL,
+ pool->name);
+ if (IS_ERR(dev_ret)) {
+ pr_err("dmabuf-pools: failed to create char device.\n");
+ return;
+ }
+
+ cdev_init(&pool->pool_dev, &dmabuf_pool_fops);
+ ret = cdev_add(&pool->pool_dev, dev->device_devt, NUM_POOL_MINORS);
+ if (ret < 0) {
+ device_destroy(dev->pool_class, pool->pool_devt);
+ pr_err("dmabuf-pools: failed to add char device.\n");
+ }
+
+ spin_lock_init(&pool->free_lock);
+ spin_lock_init(&pool->stat_lock);
+ pool->free_list_size = 0;
+
+ if (pool->flags & DMABUF_POOL_FLAG_DEFER_FREE)
+ dmabuf_pool_init_deferred_free(pool);
+
+ if ((pool->flags & DMABUF_POOL_FLAG_DEFER_FREE) || pool->ops->shrink) {
+ ret = dmabuf_pool_init_shrinker(pool);
+ if (ret)
+ pr_err("%s: Failed to register shrinker\n", __func__);
+ }
+
+ pool->num_of_buffers = 0;
+ pool->num_of_alloc_bytes = 0;
+ pool->alloc_bytes_wm = 0;
+
+ pool_root = debugfs_create_dir(pool->name, dev->debug_root);
+ debugfs_create_u64("num_of_buffers",
+ 0444, pool_root,
+ &pool->num_of_buffers);
+ debugfs_create_u64("num_of_alloc_bytes",
+ 0444,
+ pool_root,
+ &pool->num_of_alloc_bytes);
+ debugfs_create_u64("alloc_bytes_wm",
+ 0444,
+ pool_root,
+ &pool->alloc_bytes_wm);
+
+ if (pool->shrinker.count_objects &&
+ pool->shrinker.scan_objects) {
+ snprintf(debug_name, 64, "%s_shrink", pool->name);
+ debugfs_create_file(debug_name,
+ 0644,
+ pool_root,
+ pool,
+ &debug_shrink_fops);
+ }
+
+ down_write(&dev->lock);
+ pool->id = pool_id++;
+ /*
+ * use negative pool->id to reverse the priority -- when traversing
+ * the list later attempt higher id numbers first
+ */
+ plist_node_init(&pool->node, -pool->id);
+ plist_add(&pool->node, &dev->pools);
+
+ up_write(&dev->lock);
+}
+EXPORT_SYMBOL(dmabuf_pool_add);
+
+static int dmabuf_pool_device_create(void)
+{
+ struct dmabuf_pool_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+ return -ENOMEM;
+
+ ret = alloc_chrdev_region(&idev->device_devt, 0, NUM_POOL_MINORS,
+ DEVNAME);
+ if (ret)
+ goto free_idev;
+
+ idev->pool_class = class_create(THIS_MODULE, DEVNAME);
+ if (IS_ERR(idev->pool_class)) {
+ ret = PTR_ERR(idev->pool_class);
+ goto unreg_region;
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->pools);
+ internal_dev = idev;
+ return 0;
+
+unreg_region:
+ unregister_chrdev_region(idev->device_devt, NUM_POOL_MINORS);
+free_idev:
+ kfree(idev);
+ return ret;
+
+}
+subsys_initcall(dmabuf_pool_device_create);
new file mode 100644
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * drivers/dma-buf/pools/dmabuf-pools.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#ifndef _DMABUF_POOLS_H
+#define _DMABUF_POOLS_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <uapi/linux/dmabuf-pools.h>
+
+/**
+ * struct dmabuf_pool_buffer - metadata for a particular buffer
+ * @pool: back pointer to the pool the buffer came from
+ * @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kernel mapping if kmap_cnt is not zero
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @attachments: list head for device attachments
+ * @list: list head for deferred freeing
+ */
+struct dmabuf_pool_buffer {
+ struct dmabuf_pool *pool;
+ unsigned long flags;
+ unsigned long private_flags;
+ size_t size;
+ void *priv_virt;
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ struct sg_table *sg_table;
+ struct list_head attachments;
+ struct list_head list;
+};
+
+
+/**
+ * struct dmabuf_pool_ops - ops to operate on a given pool
+ * @allocate: allocate memory
+ * @free: free memory
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ * @shrink shrinker hook to reduce pool memory usage
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with DMABUF_POOL_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct dmabuf_pool_ops {
+ int (*allocate)(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer, unsigned long len,
+ unsigned long flags);
+ void (*free)(struct dmabuf_pool_buffer *buffer);
+ void * (*map_kernel)(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer);
+ void (*unmap_kernel)(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer);
+ int (*map_user)(struct dmabuf_pool *mapper,
+ struct dmabuf_pool_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct dmabuf_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+};
+
+/**
+ * pool flags - flags between the dmabuf pools and core dmabuf code
+ */
+#define DMABUF_POOL_FLAG_DEFER_FREE BIT(0)
+
+/**
+ * private flags - flags internal to dmabuf_pools
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * pool-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define DMABUF_POOL_PRIV_FLAG_SHRINKER_FREE BIT(0)
+
+/**
+ * struct dmabuf_pool - represents a dmabuf pool in the system
+ * @node: rb node to put the pool on the device's tree of pools
+ * @pool_dev miscdevice for pool devicenode
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of pool
+ * @name: used for debugging/device-node name
+ * @shrinker: a shrinker for the pool
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @num_of_buffers the number of currently allocated buffers
+ * @num_of_alloc_bytes the number of allocated bytes
+ * @alloc_bytes_wm the number of allocated bytes watermarka
+ * @stat_lock lock for pool statistics
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only pool is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved pool.
+ */
+struct dmabuf_pool {
+ struct plist_node node;
+ dev_t pool_devt;
+ struct cdev pool_dev;
+ unsigned int minor;
+ struct dmabuf_pool_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ u64 num_of_buffers;
+ u64 num_of_alloc_bytes;
+ u64 alloc_bytes_wm;
+ spinlock_t stat_lock;
+};
+
+/**
+ * dmabuf_pool_add - adds a pool to dmabuf pools
+ * @pool: the pool to add
+ */
+void dmabuf_pool_add(struct dmabuf_pool *pool);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *dmabuf_pool_map_kernel(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer);
+void dmabuf_pool_unmap_kernel(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer);
+int dmabuf_pool_map_user(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer,
+ struct vm_area_struct *vma);
+int dmabuf_pool_buffer_zero(struct dmabuf_pool_buffer *buffer);
+int dmabuf_pool_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+int dmabuf_pool_alloc(struct dmabuf_pool *pool, size_t len,
+ unsigned int flags);
+void dmabuf_pool_buffer_destroy(struct dmabuf_pool_buffer *buffer);
+
+/**
+ * dmabuf_pool_init_shrinker
+ * @pool: the pool
+ *
+ * If a pool sets the DMABUF_POOL_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the pool's shrink op.
+ */
+int dmabuf_pool_init_shrinker(struct dmabuf_pool *pool);
+
+/**
+ * dmabuf_pool_init_deferred_free -- initialize deferred free functionality
+ * @pool: the pool
+ *
+ * If a pool sets the DMABUF_POOL_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int dmabuf_pool_init_deferred_free(struct dmabuf_pool *pool);
+
+/**
+ * dmabuf_pool_freelist_add - add a buffer to the deferred free list
+ * @pool: the pool
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void dmabuf_pool_freelist_add(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer);
+
+/**
+ * dmabuf_pool_freelist_drain - drain the deferred free list
+ * @pool: the pool
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t dmabuf_pool_freelist_drain(struct dmabuf_pool *pool, size_t size);
+
+/**
+ * dmabuf_pool_freelist_shrink - drain the deferred free
+ * list, skipping any pool-specific
+ * pooling or caching mechanisms
+ *
+ * @pool: the pool
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @dmabuf_pool_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the pool.ops.free callback honoring the DMABUF_POOL_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t dmabuf_pool_freelist_shrink(struct dmabuf_pool *pool,
+ size_t size);
+
+/**
+ * dmabuf_pool_freelist_size - returns the size of the freelist in bytes
+ * @pool: the pool
+ */
+size_t dmabuf_pool_freelist_size(struct dmabuf_pool *pool);
+
+
+long dmabuf_pool_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#endif /* _DMABUF_POOLS_H */
new file mode 100644
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/dma-buf/pool/pool-helpers.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "dmabuf-pools.h"
+
+void *dmabuf_pool_map_kernel(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(array_size(npages,
+ sizeof(struct page *)));
+ struct page **tmp = pages;
+
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ if (buffer->flags & DMABUF_POOL_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+
+ WARN_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++)
+ *(tmp++) = page++;
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+void dmabuf_pool_unmap_kernel(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int dmabuf_pool_map_user(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+ int ret;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static int dmabuf_pool_clear_pages(struct page **pages, int num,
+ pgprot_t pgprot)
+{
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+
+ if (!addr)
+ return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
+
+ return 0;
+}
+
+static int dmabuf_pool_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = dmabuf_pool_clear_pages(pages, p, pgprot);
+ if (ret)
+ return ret;
+ p = 0;
+ }
+ }
+ if (p)
+ ret = dmabuf_pool_clear_pages(pages, p, pgprot);
+
+ return ret;
+}
+
+int dmabuf_pool_buffer_zero(struct dmabuf_pool_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+
+ if (buffer->flags & DMABUF_POOL_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ return dmabuf_pool_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int dmabuf_pool_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return dmabuf_pool_sglist_zero(&sg, 1, pgprot);
+}
+
+void dmabuf_pool_freelist_add(struct dmabuf_pool *pool,
+ struct dmabuf_pool_buffer *buffer)
+{
+ spin_lock(&pool->free_lock);
+ list_add(&buffer->list, &pool->free_list);
+ pool->free_list_size += buffer->size;
+ spin_unlock(&pool->free_lock);
+ wake_up(&pool->waitqueue);
+}
+
+size_t dmabuf_pool_freelist_size(struct dmabuf_pool *pool)
+{
+ size_t size;
+
+ spin_lock(&pool->free_lock);
+ size = pool->free_list_size;
+ spin_unlock(&pool->free_lock);
+
+ return size;
+}
+
+static size_t _dmabuf_pool_freelist_drain(struct dmabuf_pool *pool, size_t size,
+ bool skip_pools)
+{
+ struct dmabuf_pool_buffer *buffer;
+ size_t total_drained = 0;
+
+ if (dmabuf_pool_freelist_size(pool) == 0)
+ return 0;
+
+ spin_lock(&pool->free_lock);
+ if (size == 0)
+ size = pool->free_list_size;
+
+ while (!list_empty(&pool->free_list)) {
+ if (total_drained >= size)
+ break;
+ buffer = list_first_entry(&pool->free_list,
+ struct dmabuf_pool_buffer,
+ list);
+ list_del(&buffer->list);
+ pool->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |=
+ DMABUF_POOL_PRIV_FLAG_SHRINKER_FREE;
+ total_drained += buffer->size;
+ spin_unlock(&pool->free_lock);
+ dmabuf_pool_buffer_destroy(buffer);
+ spin_lock(&pool->free_lock);
+ }
+ spin_unlock(&pool->free_lock);
+
+ return total_drained;
+}
+
+size_t dmabuf_pool_freelist_drain(struct dmabuf_pool *pool, size_t size)
+{
+ return _dmabuf_pool_freelist_drain(pool, size, false);
+}
+
+size_t dmabuf_pool_freelist_shrink(struct dmabuf_pool *pool, size_t size)
+{
+ return _dmabuf_pool_freelist_drain(pool, size, true);
+}
+
+static int dmabuf_pool_deferred_free(void *data)
+{
+ struct dmabuf_pool *pool = data;
+
+ while (true) {
+ struct dmabuf_pool_buffer *buffer;
+
+ wait_event_freezable(pool->waitqueue,
+ dmabuf_pool_freelist_size(pool) > 0);
+
+ spin_lock(&pool->free_lock);
+ if (list_empty(&pool->free_list)) {
+ spin_unlock(&pool->free_lock);
+ continue;
+ }
+ buffer = list_first_entry(&pool->free_list,
+ struct dmabuf_pool_buffer,
+ list);
+ list_del(&buffer->list);
+ pool->free_list_size -= buffer->size;
+ spin_unlock(&pool->free_lock);
+ dmabuf_pool_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int dmabuf_pool_init_deferred_free(struct dmabuf_pool *pool)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&pool->free_list);
+ init_waitqueue_head(&pool->waitqueue);
+ pool->task = kthread_run(dmabuf_pool_deferred_free, pool,
+ "%s", pool->name);
+ if (IS_ERR(pool->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_ERR_OR_ZERO(pool->task);
+ }
+ sched_setscheduler(pool->task, SCHED_IDLE, ¶m);
+ return 0;
+}
+
+static unsigned long dmabuf_pool_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct dmabuf_pool *pool = container_of(shrinker, struct dmabuf_pool,
+ shrinker);
+ int total = 0;
+
+ total = dmabuf_pool_freelist_size(pool) / PAGE_SIZE;
+ if (pool->ops->shrink)
+ total += pool->ops->shrink(pool, sc->gfp_mask, 0);
+ return total;
+}
+
+static unsigned long dmabuf_pool_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ struct dmabuf_pool *pool = container_of(shrinker, struct dmabuf_pool,
+ shrinker);
+ int freed = 0;
+ int to_scan = sc->nr_to_scan;
+
+ if (to_scan == 0)
+ return 0;
+
+ /*
+ * shrink the free list first, no point in zeroing the memory if we're
+ * just going to reclaim it. Also, skip any possible page pooling.
+ */
+ if (pool->flags & DMABUF_POOL_FLAG_DEFER_FREE) {
+ freed = dmabuf_pool_freelist_shrink(pool, to_scan * PAGE_SIZE);
+ freed /= PAGE_SIZE;
+ }
+
+ to_scan -= freed;
+ if (to_scan <= 0)
+ return freed;
+
+ if (pool->ops->shrink)
+ freed += pool->ops->shrink(pool, sc->gfp_mask, to_scan);
+ return freed;
+}
+
+int dmabuf_pool_init_shrinker(struct dmabuf_pool *pool)
+{
+ pool->shrinker.count_objects = dmabuf_pool_shrink_count;
+ pool->shrinker.scan_objects = dmabuf_pool_shrink_scan;
+ pool->shrinker.seeks = DEFAULT_SEEKS;
+ pool->shrinker.batch = 0;
+
+ return register_shrinker(&pool->shrinker);
+}
new file mode 100644
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include "dmabuf-pools.h"
+
+union pool_ioctl_arg {
+ struct dmabuf_pool_allocation_data pool_allocation;
+};
+
+static int validate_ioctl_arg(unsigned int cmd, union pool_ioctl_arg *arg)
+{
+ switch (cmd) {
+ case DMABUF_POOL_IOC_ALLOC:
+ if (arg->pool_allocation.reserved0 ||
+ arg->pool_allocation.reserved1)
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int pool_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+long dmabuf_pool_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ unsigned int dir;
+ union pool_ioctl_arg data;
+
+ dir = pool_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ /*
+ * The copy_from_user is unconditional here for both read and write
+ * to do the validate. If there is no write for the ioctl, the
+ * buffer is cleared
+ */
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ ret = validate_ioctl_arg(cmd, &data);
+ if (ret) {
+ pr_warn_once("%s: ioctl validate failed\n", __func__);
+ return ret;
+ }
+
+ if (!(dir & _IOC_WRITE))
+ memset(&data, 0, sizeof(data));
+
+ switch (cmd) {
+ case DMABUF_POOL_IOC_ALLOC:
+ {
+ struct cdev *cdev = filp->private_data;
+ struct dmabuf_pool *pool;
+ int fd;
+
+ pool = container_of(cdev, struct dmabuf_pool, pool_dev);
+
+ fd = dmabuf_pool_alloc(pool, data.pool_allocation.len,
+ data.pool_allocation.flags);
+ if (fd < 0)
+ return fd;
+
+ data.pool_allocation.fd = fd;
+
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+#ifndef _UAPI_LINUX_DMABUF_POOL_H
+#define _UAPI_LINUX_DMABUF_POOL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * allocation flags - the lower 16 bits are used by core dmabuf pools, the
+ * upper 16 bits are reserved for use by the pools themselves.
+ */
+
+/*
+ * mappings of this buffer should be cached, dmabuf pools will do cache
+ * maintenance when the buffer is mapped for dma
+ */
+#define DMABUF_POOL_FLAG_CACHED 1
+
+/**
+ * DOC: DMABUF Pool Userspace API
+ *
+ */
+
+/**
+ * struct dmabuf_pool_allocation_data - metadata passed from userspace for
+ * allocations
+ * @len: size of the allocation
+ * @flags: flags passed to pool
+ * @fd: will be populated with a fd which provdes the
+ * handle to the allocated dma-buf
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct dmabuf_pool_allocation_data {
+ __u64 len;
+ __u32 flags;
+ __u32 fd;
+ __u32 reserved0;
+ __u32 reserved1;
+};
+
+#define DMABUF_POOL_IOC_MAGIC 'P'
+
+/**
+ * DOC: DMABUF_POOL_IOC_ALLOC - allocate memory from pool
+ *
+ * Takes an dmabuf_pool_allocation_data struct and returns it with the fd field
+ * populated with the dmabuf handle of the allocation.
+ */
+#define DMABUF_POOL_IOC_ALLOC _IOWR(DMABUF_POOL_IOC_MAGIC, 0, \
+ struct dmabuf_pool_allocation_data)
+
+#endif /* _UAPI_LINUX_DMABUF_POOL_H */
This patch introduces the dma-buf pools framework. This framework allows for different pool implementations to be created, which act as dma-buf exporters, allowing userland to allocate specific types of memory for use in dma-buf sharing. This resembles the Android ION framework in that it takes that code and renames most of the variables. (Following Rafael Wysocki's earlier theory that the "easiest way to sell a once rejected feature is to advertise it under a different name" :) However, the API has been greatly simplified compared to ION. This patchset extends some of my (and Benjamin's) earlier work with the ION per-heap device nodes. Each pool (previously "heap" in ION) has its own device node, which one can allocate from using the DMABUF_POOL_IOC_ALLOC which is very similar to the ION_IOC_ALLOC call. There is no equivalent ION_IOC_HEAP_QUERY interface, as the pools all have their own device nodes. Additionally, any unused code from ION was removed. NOTE: Reworked the per-pool devices to create a proper class so Android can have a nice /dev/dmabuf_pools/ directory. Its working but I'm almost sure if I did it wrong, as its much more complex then just using a miscdevice. Extra review would be helpful. Cc: Laura Abbott <labbott@redhat.com> Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Liam Mark <lmark@codeaurora.org> Cc: Brian Starkey <Brian.Starkey@arm.com> Cc: Andrew F. Davis <afd@ti.com> Cc: Chenbo Feng <fengc@google.com> Cc: Alistair Strachan <astrachan@google.com> Cc: dri-devel@lists.freedesktop.org Signed-off-by: John Stultz <john.stultz@linaro.org> --- MAINTAINERS | 13 + drivers/dma-buf/Kconfig | 2 + drivers/dma-buf/Makefile | 1 + drivers/dma-buf/pools/Kconfig | 10 + drivers/dma-buf/pools/Makefile | 2 + drivers/dma-buf/pools/dmabuf-pools.c | 670 +++++++++++++++++++++++++++++++++++ drivers/dma-buf/pools/dmabuf-pools.h | 244 +++++++++++++ drivers/dma-buf/pools/pool-helpers.c | 317 +++++++++++++++++ drivers/dma-buf/pools/pool-ioctl.c | 94 +++++ include/uapi/linux/dmabuf-pools.h | 59 +++ 10 files changed, 1412 insertions(+) create mode 100644 drivers/dma-buf/pools/Kconfig create mode 100644 drivers/dma-buf/pools/Makefile create mode 100644 drivers/dma-buf/pools/dmabuf-pools.c create mode 100644 drivers/dma-buf/pools/dmabuf-pools.h create mode 100644 drivers/dma-buf/pools/pool-helpers.c create mode 100644 drivers/dma-buf/pools/pool-ioctl.c create mode 100644 include/uapi/linux/dmabuf-pools.h -- 2.7.4