@@ -21,12 +21,204 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_gem_cma_helper.h>
+struct drm_gem_cma_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+ bool is_mapped;
+};
+
+static int drm_gem_cma_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_cma_dmabuf_attachment *drm_gem_cma_attach;
+
+ drm_gem_cma_attach = kzalloc(sizeof(*drm_gem_cma_attach), GFP_KERNEL);
+ if (!drm_gem_cma_attach)
+ return -ENOMEM;
+
+ drm_gem_cma_attach->dir = DMA_NONE;
+ attach->priv = drm_gem_cma_attach;
+
+ return 0;
+}
+
+static void drm_gem_cma_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_cma_dmabuf_attachment *drm_gem_cma_attach = attach->priv;
+ struct sg_table *sgt;
+
+ if (!drm_gem_cma_attach)
+ return;
+
+ sgt = &drm_gem_cma_attach->sgt;
+
+ if (drm_gem_cma_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ drm_gem_cma_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(drm_gem_cma_attach);
+ attach->priv = NULL;
+}
+
+static struct sg_table *
+drm_gem_cma_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_cma_dmabuf_attachment *drm_gem_cma_attach = attach->priv;
+ struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
+ struct drm_device *dev = cma_obj->base.dev;
+ struct sg_table *sgt = NULL;
+ int nents, ret;
+
+ /* just return current sgt if already requested. */
+ if (drm_gem_cma_attach->dir == dir && drm_gem_cma_attach->is_mapped)
+ return &drm_gem_cma_attach->sgt;
+
+ sgt = &drm_gem_cma_attach->sgt;
+
+ ret = dma_common_get_sgtable(dev->dev, sgt,
+ cma_obj->vaddr, cma_obj->paddr, cma_obj->base.size);
+ if (ret) {
+ DRM_ERROR("failed to get sgt.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+ }
+
+ drm_gem_cma_attach->is_mapped = true;
+ drm_gem_cma_attach->dir = dir;
+ attach->priv = drm_gem_cma_attach;
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void drm_gem_cma_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* Nothing to do */
+}
+
+static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
+{
+ struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+
+ /*
+ * drm_gem_cma_dmabuf_release() call means that file object's
+ * f_count is 0 and it calls drm_gem_object_handle_unreference()
+ * to drop the references that these values had been increased
+ * at drm_prime_handle_to_fd()
+ */
+ if (cma_obj->base.export_dma_buf == dmabuf) {
+ cma_obj->base.export_dma_buf = NULL;
+
+ /*
+ * drop this gem object refcount to release allocated buffer
+ * and resources.
+ */
+ drm_gem_object_unreference_unlocked(&cma_obj->base);
+ }
+}
+
+static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ struct drm_gem_cma_object *cma_obj = dma_buf->priv;
+ return cma_obj->paddr;
+}
+
+static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+ struct drm_device *dev = cma_obj->base.dev;
+ int ret;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = dma_mmap_coherent(dev->dev, vma,
+ cma_obj->vaddr, cma_obj->paddr, cma_obj->base.size);
+
+ if (ret) {
+ DRM_DEBUG_PRIME("Remapping memory failed, error: %d\n", ret);
+ return ret;
+ }
+ DRM_DEBUG_PRIME("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
+ __func__, (unsigned long)cma_obj->paddr, vma->vm_start,
+ cma_obj->base.size);
+
+ return ret;
+}
+
+static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
+ .attach = drm_gem_cma_attach_dma_buf,
+ .detach = drm_gem_cma_detach_dma_buf,
+ .map_dma_buf = drm_gem_cma_map_dma_buf,
+ .unmap_dma_buf = drm_gem_cma_unmap_dma_buf,
+ .kmap = drm_gem_cma_dmabuf_kmap_atomic,
+ .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic,
+ .mmap = drm_gem_cma_dmabuf_mmap,
+ .release = drm_gem_cma_dmabuf_release,
+};
+
+struct dma_buf *drm_gem_cma_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ flags |= O_RDWR;
+ return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
+ cma_obj->base.size, flags);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_export);
+
+struct drm_gem_object *drm_gem_cma_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dmabuf)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ if (dmabuf->ops == &drm_gem_cma_dmabuf_ops) {
+ struct drm_gem_object *obj;
+
+ cma_obj = dmabuf->priv;
+ obj = &cma_obj->base;
+
+ /* is it from our device? */
+ if (obj->dev == drm_dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import);
+
static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
{
return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
@@ -45,4 +45,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops;
void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
#endif
+struct dma_buf *drm_gem_cma_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags);
+
+struct drm_gem_object *drm_gem_cma_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dmabuf);
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */