@@ -47,4 +47,6 @@ menuconfig DMABUF_HEAPS
this allows userspace to allocate dma-bufs that can be shared between
drivers.
+source "drivers/dma-buf/heaps/Kconfig"
+
endmenu
new file mode 100644
@@ -0,0 +1,6 @@
+config DMABUF_HEAPS_SYSTEM
+ bool "DMA-BUF System Heap"
+ depends on DMABUF_HEAPS
+ help
+ Choose this option to enable the system dmabuf heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += heap-helpers.o
+obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
new file mode 100644
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF System heap exporter
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <asm/page.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-heap.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "heap-helpers.h"
+
+struct system_heap {
+ struct dma_heap *heap;
+} sys_heap;
+
+static void system_heap_free(struct heap_helper_buffer *buffer)
+{
+ pgoff_t pg;
+
+ for (pg = 0; pg < buffer->pagecount; pg++)
+ __free_page(buffer->pages[pg]);
+ kfree(buffer->pages);
+ kfree(buffer);
+}
+
+static int system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long flags)
+{
+ struct heap_helper_buffer *helper_buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ INIT_HEAP_HELPER_BUFFER(helper_buffer, system_heap_free);
+ helper_buffer->heap_buffer.flags = flags;
+ helper_buffer->heap_buffer.heap = heap;
+ helper_buffer->heap_buffer.size = len;
+
+ helper_buffer->pagecount = len / PAGE_SIZE;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!helper_buffer->pages[pg])
+ goto err1;
+ }
+
+ /* create the dmabuf */
+ exp_info.ops = &heap_helper_ops;
+ exp_info.size = len;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = &helper_buffer->heap_buffer;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto err1;
+ }
+
+ helper_buffer->heap_buffer.dmabuf = dmabuf;
+
+ ret = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+err1:
+ while (pg > 0)
+ __free_page(helper_buffer->pages[--pg]);
+ kfree(helper_buffer->pages);
+err0:
+ kfree(helper_buffer);
+
+ return -ENOMEM;
+}
+
+static struct dma_heap_ops system_heap_ops = {
+ .allocate = system_heap_allocate,
+};
+
+static int system_heap_create(void)
+{
+ struct dma_heap_export_info exp_info;
+ int ret = 0;
+
+ exp_info.name = "system_heap";
+ exp_info.ops = &system_heap_ops;
+ exp_info.priv = &sys_heap;
+
+ sys_heap.heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_heap.heap))
+ ret = PTR_ERR(sys_heap.heap);
+
+ return ret;
+}
+device_initcall(system_heap_create);