diff mbox series

[v5,5/8] iio: core: Add new DMABUF interface infrastructure

Message ID 20231219175009.65482-6-paul@crapouillou.net
State New
Headers show
Series [v5,1/8] iio: buffer-dma: Get rid of outgoing queue | expand

Commit Message

Paul Cercueil Dec. 19, 2023, 5:50 p.m. UTC
Add the necessary infrastructure to the IIO core to support a new
optional DMABUF based interface.

With this new interface, DMABUF objects (externally created) can be
attached to a IIO buffer, and subsequently used for data transfer.

A userspace application can then use this interface to share DMABUF
objects between several interfaces, allowing it to transfer data in a
zero-copy fashion, for instance between IIO and the USB stack.

The userspace application can also memory-map the DMABUF objects, and
access the sample data directly. The advantage of doing this vs. the
read() interface is that it avoids an extra copy of the data between the
kernel and userspace. This is particularly userful for high-speed
devices which produce several megabytes or even gigabytes of data per
second.

As part of the interface, 3 new IOCTLs have been added:

IIO_BUFFER_DMABUF_ATTACH_IOCTL(int fd):
 Attach the DMABUF object identified by the given file descriptor to the
 buffer.

IIO_BUFFER_DMABUF_DETACH_IOCTL(int fd):
 Detach the DMABUF object identified by the given file descriptor from
 the buffer. Note that closing the IIO buffer's file descriptor will
 automatically detach all previously attached DMABUF objects.

IIO_BUFFER_DMABUF_ENQUEUE_IOCTL(struct iio_dmabuf *):
 Request a data transfer to/from the given DMABUF object. Its file
 descriptor, as well as the transfer size and flags are provided in the
 "iio_dmabuf" structure.

These three IOCTLs have to be performed on the IIO buffer's file
descriptor, obtained using the IIO_BUFFER_GET_FD_IOCTL() ioctl.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>

---
v2: Only allow the new IOCTLs on the buffer FD created with
    IIO_BUFFER_GET_FD_IOCTL().

v3: - Get rid of the old IOCTLs. The IIO subsystem does not create or
    manage DMABUFs anymore, and only attaches/detaches externally
    created DMABUFs.
    - Add IIO_BUFFER_DMABUF_CYCLIC to the supported flags.

v5: - Use dev_err() instead of pr_err()
    - Inline to_iio_dma_fence()
    - Add comment to explain why we unref twice when detaching dmabuf
    - Remove TODO comment. It is actually safe to free the file's
      private data even when transfers are still pending because it
      won't be accessed.
    - Fix documentation of new fields in struct iio_buffer_access_funcs
    - iio_dma_resv_lock() does not need to be exported, make it static
---
 drivers/iio/industrialio-buffer.c | 402 ++++++++++++++++++++++++++++++
 include/linux/iio/buffer_impl.h   |  26 ++
 include/uapi/linux/iio/buffer.h   |  22 ++
 3 files changed, 450 insertions(+)

Comments

Jonathan Cameron Dec. 21, 2023, 12:06 p.m. UTC | #1
On Tue, 19 Dec 2023 18:50:06 +0100
Paul Cercueil <paul@crapouillou.net> wrote:

> Add the necessary infrastructure to the IIO core to support a new
> optional DMABUF based interface.
> 
> With this new interface, DMABUF objects (externally created) can be
> attached to a IIO buffer, and subsequently used for data transfer.
> 
> A userspace application can then use this interface to share DMABUF
> objects between several interfaces, allowing it to transfer data in a
> zero-copy fashion, for instance between IIO and the USB stack.
> 
> The userspace application can also memory-map the DMABUF objects, and
> access the sample data directly. The advantage of doing this vs. the
> read() interface is that it avoids an extra copy of the data between the
> kernel and userspace. This is particularly userful for high-speed
> devices which produce several megabytes or even gigabytes of data per
> second.
> 
> As part of the interface, 3 new IOCTLs have been added:
> 
> IIO_BUFFER_DMABUF_ATTACH_IOCTL(int fd):
>  Attach the DMABUF object identified by the given file descriptor to the
>  buffer.
> 
> IIO_BUFFER_DMABUF_DETACH_IOCTL(int fd):
>  Detach the DMABUF object identified by the given file descriptor from
>  the buffer. Note that closing the IIO buffer's file descriptor will
>  automatically detach all previously attached DMABUF objects.
> 
> IIO_BUFFER_DMABUF_ENQUEUE_IOCTL(struct iio_dmabuf *):
>  Request a data transfer to/from the given DMABUF object. Its file
>  descriptor, as well as the transfer size and flags are provided in the
>  "iio_dmabuf" structure.
> 
> These three IOCTLs have to be performed on the IIO buffer's file
> descriptor, obtained using the IIO_BUFFER_GET_FD_IOCTL() ioctl.
> 

Fair enough - so they don't apply to the 'legacy' buffer which simplifies
things but in one place you assume that logic is used (given error return
values).

> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> 
This is big and complex and I'm out of time for now, so I've made some
comments but should revisit it.
I'm also looking for review from those more familiar with dmabuf side
of things than I am!

Jonathan


>  
> +static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
> +{
> +	int ret;
> +
> +	ret = dma_resv_lock_interruptible(dmabuf->resv, NULL);
> +	if (ret) {
> +		if (ret != -EDEADLK)
> +			goto out;
> +		if (nonblock) {
> +			ret = -EBUSY;
> +			goto out;
> +		}
> +
> +		ret = dma_resv_lock_slow_interruptible(dmabuf->resv, NULL);
> +	}
> +
> +out:
> +	return ret;

I'm not a fan gotos that do nothing.  Just return in appropriate places above.

> +}
>
> +static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib, int *user_req)
> +{
> +	struct dma_buf_attachment *attach;
> +	struct iio_dmabuf_priv *priv;
> +	struct dma_buf *dmabuf;
> +	int dmabuf_fd, ret = 0;
> +
> +	if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd)))
> +		return -EFAULT;
> +
> +	dmabuf = dma_buf_get(dmabuf_fd);
> +	if (IS_ERR(dmabuf))
> +		return PTR_ERR(dmabuf);
> +
> +	attach = iio_buffer_find_attachment(ib->indio_dev, dmabuf);
> +	if (IS_ERR(attach)) {
> +		ret = PTR_ERR(attach);
> +		goto out_dmabuf_put;
> +	}
> +
> +	priv = attach->importer_priv;
> +	list_del_init(&priv->entry);
> +
> +	/*
> +	 * Unref twice to release the reference obtained with
> +	 * iio_buffer_find_attachment() above, and the one obtained in
> +	 * iio_buffer_attach_dmabuf().
> +	 */
> +	iio_buffer_dmabuf_put(attach);
> +	iio_buffer_dmabuf_put(attach);
> +
> +out_dmabuf_put:
> +	dma_buf_put(dmabuf);
As below. Feels like a __free(dma_buf_put) bit of magic would be a nice to have.

> +
> +	return ret;
> +}
> +
> +static const char *
> +iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
> +{
> +	return "iio";
> +}
> +
> +static void iio_buffer_dma_fence_release(struct dma_fence *fence)
> +{
> +	struct iio_dma_fence *iio_fence =
> +		container_of(fence, struct iio_dma_fence, base);
> +
> +	kfree(iio_fence);
> +}
> +
> +static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
> +	.get_driver_name	= iio_buffer_dma_fence_get_driver_name,
> +	.get_timeline_name	= iio_buffer_dma_fence_get_driver_name,
> +	.release		= iio_buffer_dma_fence_release,
> +};
> +
> +static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib,
> +				     struct iio_dmabuf __user *iio_dmabuf_req,
> +				     bool nonblock)
> +{
> +	struct iio_dev *indio_dev = ib->indio_dev;
> +	struct iio_buffer *buffer = ib->buffer;
> +	struct iio_dmabuf iio_dmabuf;
> +	struct dma_buf_attachment *attach;
> +	struct iio_dmabuf_priv *priv;
> +	enum dma_data_direction dir;
> +	struct iio_dma_fence *fence;
> +	struct dma_buf *dmabuf;
> +	struct sg_table *sgt;
> +	unsigned long timeout;
> +	bool dma_to_ram;
> +	bool cyclic;
> +	int ret;
> +
> +	if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf)))
> +		return -EFAULT;
> +
> +	if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
> +		return -EINVAL;
> +
> +	cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
> +
> +	/* Cyclic flag is only supported on output buffers */
> +	if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT)
> +		return -EINVAL;
> +
> +	dmabuf = dma_buf_get(iio_dmabuf.fd);
> +	if (IS_ERR(dmabuf))
> +		return PTR_ERR(dmabuf);
> +
> +	if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) {
> +		ret = -EINVAL;
> +		goto err_dmabuf_put;
> +	}
> +
> +	attach = iio_buffer_find_attachment(indio_dev, dmabuf);
> +	if (IS_ERR(attach)) {
> +		ret = PTR_ERR(attach);
> +		goto err_dmabuf_put;

Might be worth some cleanup.h magic given this put happens in all exit paths.

> +	}
> +
> +	priv = attach->importer_priv;
> +
> +	dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
> +	dir = dma_to_ram ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> +
> +	sgt = dma_buf_map_attachment(attach, dir);
> +	if (IS_ERR(sgt)) {
> +		ret = PTR_ERR(sgt);
> +		dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", ret);
> +		goto err_attachment_put;
> +	}
> +
> +	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
> +	if (!fence) {
> +		ret = -ENOMEM;
> +		goto err_unmap_attachment;
> +	}
> +
> +	fence->priv = priv;
> +	fence->sgt = sgt;
> +	fence->dir = dir;
> +	priv->fence = fence;
> +
> +	dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
> +		       &priv->lock, priv->context, 0);
> +
> +	ret = iio_dma_resv_lock(dmabuf, nonblock);
> +	if (ret)
> +		goto err_fence_put;
> +
> +	timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
> +
> +	/* Make sure we don't have writers */
> +	ret = (int) dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
> +					  true, timeout);

I'd handle this and similar cases as long rather than adding the odd looking cast and making
me think too much about it.

> +	if (ret == 0)
> +		ret = -EBUSY;
> +	if (ret < 0)
> +		goto err_resv_unlock;
> +
> +	if (dma_to_ram) {
> +		/*
> +		 * If we're writing to the DMABUF, make sure we don't have
> +		 * readers
> +		 */
> +		ret = (int) dma_resv_wait_timeout(dmabuf->resv,
> +						  DMA_RESV_USAGE_READ, true,
> +						  timeout);
> +		if (ret == 0)
> +			ret = -EBUSY;
> +		if (ret < 0)
> +			goto err_resv_unlock;
> +	}
> +
> +	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
> +	if (ret)
> +		goto err_resv_unlock;
> +
> +	dma_resv_add_fence(dmabuf->resv, &fence->base,
> +			   dma_resv_usage_rw(dma_to_ram));
> +	dma_resv_unlock(dmabuf->resv);
> +
> +	ret = buffer->access->enqueue_dmabuf(buffer, priv->block, sgt,
> +					     iio_dmabuf.bytes_used, cyclic);
> +	if (ret)
> +		iio_buffer_signal_dmabuf_done(attach, ret);

I'd like a comment on why using the 'successful' path cleanup makes sense in this
error case.  It's possible to figure it out, but reviewers are lazy and generally
we like the cleanup to be obvious and local on error paths.

> +
> +	dma_buf_put(dmabuf);
> +
> +	return ret;
> +
> +err_resv_unlock:
> +	dma_resv_unlock(dmabuf->resv);
> +err_fence_put:
> +	dma_fence_put(&fence->base);
> +err_unmap_attachment:
> +	dma_buf_unmap_attachment(attach, sgt, dir);
> +err_attachment_put:
> +	iio_buffer_dmabuf_put(attach);
> +err_dmabuf_put:
> +	dma_buf_put(dmabuf);
> +
> +	return ret;
> +}
> +
> +void iio_buffer_signal_dmabuf_done(struct dma_buf_attachment *attach, int ret)
> +{
> +	struct iio_dmabuf_priv *priv = attach->importer_priv;
> +	struct iio_dma_fence *fence = priv->fence;
> +	enum dma_data_direction dir = fence->dir;
> +	struct sg_table *sgt = fence->sgt;
> +
> +	dma_fence_get(&fence->base);

I don't know much about dma_fence, but is it valid to access
contents of it (sgt, etc) before getting a reference?
Ultimately dma_fence_put() can result in a kfree_rcu() so seems
unlikely to be safe and definitely fails the 'obviously correct'
test.  Given those are I assume trivial accesses just do them 
down here perhaps?


> +	fence->base.error = ret;
> +	dma_fence_signal(&fence->base);
> +	dma_fence_put(&fence->base);
> +
> +	dma_buf_unmap_attachment(attach, sgt, dir);
> +	iio_buffer_dmabuf_put(attach);
> +}
> +EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
> +

> +static long iio_buffer_chrdev_ioctl(struct file *filp,
> +				    unsigned int cmd, unsigned long arg)
> +{
> +	struct iio_dev_buffer_pair *ib = filp->private_data;
> +	void __user *_arg = (void __user *)arg;
> +
> +	switch (cmd) {
> +	case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
> +		return iio_buffer_attach_dmabuf(ib, _arg);
> +	case IIO_BUFFER_DMABUF_DETACH_IOCTL:
> +		return iio_buffer_detach_dmabuf(ib, _arg);
> +	case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
> +		return iio_buffer_enqueue_dmabuf(ib, _arg,
> +						 filp->f_flags & O_NONBLOCK);
> +	default:
> +		return IIO_IOCTL_UNHANDLED;

Given you aren't using the ioctl handling on the legacy buffer, I think this
should simply return an error code, not the magic value we use to indicate
'all fine, but it's not mine'.
Probably -EINVAL or similar.  Note that the wrapper around the legacy buffer
ioctls translates this to -ENODEV; rather than returning from the ioctl.



> +	}
> +}
> +
>  static const struct file_operations iio_buffer_chrdev_fileops = {
>  	.owner = THIS_MODULE,
>  	.llseek = noop_llseek,
>  	.read = iio_buffer_read,
>  	.write = iio_buffer_write,
> +	.unlocked_ioctl = iio_buffer_chrdev_ioctl,
> +	.compat_ioctl = compat_ptr_ioctl,
>  	.poll = iio_buffer_poll,
>  	.release = iio_buffer_chrdev_release,
>  };
Paul Cercueil Dec. 21, 2023, 5:21 p.m. UTC | #2
Hi Jonathan,

Le jeudi 21 décembre 2023 à 12:06 +0000, Jonathan Cameron a écrit :
> On Tue, 19 Dec 2023 18:50:06 +0100
> Paul Cercueil <paul@crapouillou.net> wrote:
> 
> > Add the necessary infrastructure to the IIO core to support a new
> > optional DMABUF based interface.
> > 
> > With this new interface, DMABUF objects (externally created) can be
> > attached to a IIO buffer, and subsequently used for data transfer.
> > 
> > A userspace application can then use this interface to share DMABUF
> > objects between several interfaces, allowing it to transfer data in
> > a
> > zero-copy fashion, for instance between IIO and the USB stack.
> > 
> > The userspace application can also memory-map the DMABUF objects,
> > and
> > access the sample data directly. The advantage of doing this vs.
> > the
> > read() interface is that it avoids an extra copy of the data
> > between the
> > kernel and userspace. This is particularly userful for high-speed
> > devices which produce several megabytes or even gigabytes of data
> > per
> > second.
> > 
> > As part of the interface, 3 new IOCTLs have been added:
> > 
> > IIO_BUFFER_DMABUF_ATTACH_IOCTL(int fd):
> >  Attach the DMABUF object identified by the given file descriptor
> > to the
> >  buffer.
> > 
> > IIO_BUFFER_DMABUF_DETACH_IOCTL(int fd):
> >  Detach the DMABUF object identified by the given file descriptor
> > from
> >  the buffer. Note that closing the IIO buffer's file descriptor
> > will
> >  automatically detach all previously attached DMABUF objects.
> > 
> > IIO_BUFFER_DMABUF_ENQUEUE_IOCTL(struct iio_dmabuf *):
> >  Request a data transfer to/from the given DMABUF object. Its file
> >  descriptor, as well as the transfer size and flags are provided in
> > the
> >  "iio_dmabuf" structure.
> > 
> > These three IOCTLs have to be performed on the IIO buffer's file
> > descriptor, obtained using the IIO_BUFFER_GET_FD_IOCTL() ioctl.
> > 
> 
> Fair enough - so they don't apply to the 'legacy' buffer which
> simplifies
> things but in one place you assume that logic is used (given error
> return
> values).
> 
> > Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> > 
> This is big and complex and I'm out of time for now, so I've made
> some
> comments but should revisit it.
> I'm also looking for review from those more familiar with dmabuf side
> of things than I am!
> 
> Jonathan
> 
> 
> >  
> > +static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool
> > nonblock)
> > +{
> > +	int ret;
> > +
> > +	ret = dma_resv_lock_interruptible(dmabuf->resv, NULL);
> > +	if (ret) {
> > +		if (ret != -EDEADLK)
> > +			goto out;
> > +		if (nonblock) {
> > +			ret = -EBUSY;
> > +			goto out;
> > +		}
> > +
> > +		ret = dma_resv_lock_slow_interruptible(dmabuf-
> > >resv, NULL);
> > +	}
> > +
> > +out:
> > +	return ret;
> 
> I'm not a fan gotos that do nothing.  Just return in appropriate
> places above.
> 
> > +}
> > 
> > +static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair
> > *ib, int *user_req)
> > +{
> > +	struct dma_buf_attachment *attach;
> > +	struct iio_dmabuf_priv *priv;
> > +	struct dma_buf *dmabuf;
> > +	int dmabuf_fd, ret = 0;
> > +
> > +	if (copy_from_user(&dmabuf_fd, user_req,
> > sizeof(dmabuf_fd)))
> > +		return -EFAULT;
> > +
> > +	dmabuf = dma_buf_get(dmabuf_fd);
> > +	if (IS_ERR(dmabuf))
> > +		return PTR_ERR(dmabuf);
> > +
> > +	attach = iio_buffer_find_attachment(ib->indio_dev,
> > dmabuf);
> > +	if (IS_ERR(attach)) {
> > +		ret = PTR_ERR(attach);
> > +		goto out_dmabuf_put;
> > +	}
> > +
> > +	priv = attach->importer_priv;
> > +	list_del_init(&priv->entry);
> > +
> > +	/*
> > +	 * Unref twice to release the reference obtained with
> > +	 * iio_buffer_find_attachment() above, and the one
> > obtained in
> > +	 * iio_buffer_attach_dmabuf().
> > +	 */
> > +	iio_buffer_dmabuf_put(attach);
> > +	iio_buffer_dmabuf_put(attach);
> > +
> > +out_dmabuf_put:
> > +	dma_buf_put(dmabuf);
> As below. Feels like a __free(dma_buf_put) bit of magic would be a
> nice to have.

Whoa, never heard about this. That looks great!

> 
> > +
> > +	return ret;
> > +}
> > +
> > +static const char *
> > +iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
> > +{
> > +	return "iio";
> > +}
> > +
> > +static void iio_buffer_dma_fence_release(struct dma_fence *fence)
> > +{
> > +	struct iio_dma_fence *iio_fence =
> > +		container_of(fence, struct iio_dma_fence, base);
> > +
> > +	kfree(iio_fence);
> > +}
> > +
> > +static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
> > +	.get_driver_name	=
> > iio_buffer_dma_fence_get_driver_name,
> > +	.get_timeline_name	=
> > iio_buffer_dma_fence_get_driver_name,
> > +	.release		= iio_buffer_dma_fence_release,
> > +};
> > +
> > +static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair
> > *ib,
> > +				     struct iio_dmabuf __user
> > *iio_dmabuf_req,
> > +				     bool nonblock)
> > +{
> > +	struct iio_dev *indio_dev = ib->indio_dev;
> > +	struct iio_buffer *buffer = ib->buffer;
> > +	struct iio_dmabuf iio_dmabuf;
> > +	struct dma_buf_attachment *attach;
> > +	struct iio_dmabuf_priv *priv;
> > +	enum dma_data_direction dir;
> > +	struct iio_dma_fence *fence;
> > +	struct dma_buf *dmabuf;
> > +	struct sg_table *sgt;
> > +	unsigned long timeout;
> > +	bool dma_to_ram;
> > +	bool cyclic;
> > +	int ret;
> > +
> > +	if (copy_from_user(&iio_dmabuf, iio_dmabuf_req,
> > sizeof(iio_dmabuf)))
> > +		return -EFAULT;
> > +
> > +	if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
> > +		return -EINVAL;
> > +
> > +	cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
> > +
> > +	/* Cyclic flag is only supported on output buffers */
> > +	if (cyclic && buffer->direction !=
> > IIO_BUFFER_DIRECTION_OUT)
> > +		return -EINVAL;
> > +
> > +	dmabuf = dma_buf_get(iio_dmabuf.fd);
> > +	if (IS_ERR(dmabuf))
> > +		return PTR_ERR(dmabuf);
> > +
> > +	if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used >
> > dmabuf->size) {
> > +		ret = -EINVAL;
> > +		goto err_dmabuf_put;
> > +	}
> > +
> > +	attach = iio_buffer_find_attachment(indio_dev, dmabuf);
> > +	if (IS_ERR(attach)) {
> > +		ret = PTR_ERR(attach);
> > +		goto err_dmabuf_put;
> 
> Might be worth some cleanup.h magic given this put happens in all
> exit paths.
> 
> > +	}
> > +
> > +	priv = attach->importer_priv;
> > +
> > +	dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
> > +	dir = dma_to_ram ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> > +
> > +	sgt = dma_buf_map_attachment(attach, dir);
> > +	if (IS_ERR(sgt)) {
> > +		ret = PTR_ERR(sgt);
> > +		dev_err(&indio_dev->dev, "Unable to map
> > attachment: %d\n", ret);
> > +		goto err_attachment_put;
> > +	}
> > +
> > +	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
> > +	if (!fence) {
> > +		ret = -ENOMEM;
> > +		goto err_unmap_attachment;
> > +	}
> > +
> > +	fence->priv = priv;
> > +	fence->sgt = sgt;
> > +	fence->dir = dir;
> > +	priv->fence = fence;
> > +
> > +	dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
> > +		       &priv->lock, priv->context, 0);
> > +
> > +	ret = iio_dma_resv_lock(dmabuf, nonblock);
> > +	if (ret)
> > +		goto err_fence_put;
> > +
> > +	timeout = nonblock ? 0 :
> > msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
> > +
> > +	/* Make sure we don't have writers */
> > +	ret = (int) dma_resv_wait_timeout(dmabuf->resv,
> > DMA_RESV_USAGE_WRITE,
> > +					  true, timeout);
> 
> I'd handle this and similar cases as long rather than adding the odd
> looking cast and making
> me think too much about it.
> 
> > +	if (ret == 0)
> > +		ret = -EBUSY;
> > +	if (ret < 0)
> > +		goto err_resv_unlock;
> > +
> > +	if (dma_to_ram) {
> > +		/*
> > +		 * If we're writing to the DMABUF, make sure we
> > don't have
> > +		 * readers
> > +		 */
> > +		ret = (int) dma_resv_wait_timeout(dmabuf->resv,
> > +						 
> > DMA_RESV_USAGE_READ, true,
> > +						  timeout);
> > +		if (ret == 0)
> > +			ret = -EBUSY;
> > +		if (ret < 0)
> > +			goto err_resv_unlock;
> > +	}
> > +
> > +	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
> > +	if (ret)
> > +		goto err_resv_unlock;
> > +
> > +	dma_resv_add_fence(dmabuf->resv, &fence->base,
> > +			   dma_resv_usage_rw(dma_to_ram));
> > +	dma_resv_unlock(dmabuf->resv);
> > +
> > +	ret = buffer->access->enqueue_dmabuf(buffer, priv->block,
> > sgt,
> > +					    
> > iio_dmabuf.bytes_used, cyclic);
> > +	if (ret)
> > +		iio_buffer_signal_dmabuf_done(attach, ret);
> 
> I'd like a comment on why using the 'successful' path cleanup makes
> sense in this
> error case.  It's possible to figure it out, but reviewers are lazy
> and generally
> we like the cleanup to be obvious and local on error paths.
> 
> > +
> > +	dma_buf_put(dmabuf);
> > +
> > +	return ret;
> > +
> > +err_resv_unlock:
> > +	dma_resv_unlock(dmabuf->resv);
> > +err_fence_put:
> > +	dma_fence_put(&fence->base);
> > +err_unmap_attachment:
> > +	dma_buf_unmap_attachment(attach, sgt, dir);
> > +err_attachment_put:
> > +	iio_buffer_dmabuf_put(attach);
> > +err_dmabuf_put:
> > +	dma_buf_put(dmabuf);
> > +
> > +	return ret;
> > +}
> > +
> > +void iio_buffer_signal_dmabuf_done(struct dma_buf_attachment
> > *attach, int ret)
> > +{
> > +	struct iio_dmabuf_priv *priv = attach->importer_priv;
> > +	struct iio_dma_fence *fence = priv->fence;
> > +	enum dma_data_direction dir = fence->dir;
> > +	struct sg_table *sgt = fence->sgt;
> > +
> > +	dma_fence_get(&fence->base);
> 
> I don't know much about dma_fence, but is it valid to access
> contents of it (sgt, etc) before getting a reference?
> Ultimately dma_fence_put() can result in a kfree_rcu() so seems
> unlikely to be safe and definitely fails the 'obviously correct'
> test.  Given those are I assume trivial accesses just do them 
> down here perhaps?

It is valid to access the fence before getting a reference - the fence
won't be freed before it is signaled down below. It would be illegal to
access it after the dma_fence_put() though, which I'm not doing here.

> 
> 
> > +	fence->base.error = ret;
> > +	dma_fence_signal(&fence->base);
> > +	dma_fence_put(&fence->base);
> > +
> > +	dma_buf_unmap_attachment(attach, sgt, dir);
> > +	iio_buffer_dmabuf_put(attach);
> > +}
> > +EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
> > +
> 
> > +static long iio_buffer_chrdev_ioctl(struct file *filp,
> > +				    unsigned int cmd, unsigned
> > long arg)
> > +{
> > +	struct iio_dev_buffer_pair *ib = filp->private_data;
> > +	void __user *_arg = (void __user *)arg;
> > +
> > +	switch (cmd) {
> > +	case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
> > +		return iio_buffer_attach_dmabuf(ib, _arg);
> > +	case IIO_BUFFER_DMABUF_DETACH_IOCTL:
> > +		return iio_buffer_detach_dmabuf(ib, _arg);
> > +	case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
> > +		return iio_buffer_enqueue_dmabuf(ib, _arg,
> > +						 filp->f_flags &
> > O_NONBLOCK);
> > +	default:
> > +		return IIO_IOCTL_UNHANDLED;
> 
> Given you aren't using the ioctl handling on the legacy buffer, I
> think this
> should simply return an error code, not the magic value we use to
> indicate
> 'all fine, but it's not mine'.
> Probably -EINVAL or similar.  Note that the wrapper around the legacy
> buffer
> ioctls translates this to -ENODEV; rather than returning from the
> ioctl.

ACK for this and the other comments.

Thanks for reviewing!

Cheers,
-Paul

> > +	}
> > +}
> > +
> >  static const struct file_operations iio_buffer_chrdev_fileops = {
> >  	.owner = THIS_MODULE,
> >  	.llseek = noop_llseek,
> >  	.read = iio_buffer_read,
> >  	.write = iio_buffer_write,
> > +	.unlocked_ioctl = iio_buffer_chrdev_ioctl,
> > +	.compat_ioctl = compat_ptr_ioctl,
> >  	.poll = iio_buffer_poll,
> >  	.release = iio_buffer_chrdev_release,
> >  };
>
Jonathan Cameron Jan. 27, 2024, 4:50 p.m. UTC | #3
> > > +	iio_buffer_dmabuf_put(attach);
> > > +
> > > +out_dmabuf_put:
> > > +	dma_buf_put(dmabuf);  
> > As below. Feels like a __free(dma_buf_put) bit of magic would be a
> > nice to have.  
> 
> I'm working on the patches right now, just one quick question.
> 
> Having a __free(dma_buf_put) requires that dma_buf_put is first
> "registered" as a freeing function using DEFINE_FREE() in <linux/dma-
> buf.h>, which has not been done yet.  
> 
> That would mean carrying a dma-buf specific patch in your tree, are you
> OK with that?
Needs an ACK from appropriate maintainer, but otherwise I'm fine doing
so.  Alternative is to circle back to this later after this code is upstream.

> 
> Cheers,
> -Paul

>
Paul Cercueil Jan. 29, 2024, 1:06 p.m. UTC | #4
Hi Christian,

Le lundi 29 janvier 2024 à 13:52 +0100, Christian König a écrit :
> Am 27.01.24 um 17:50 schrieb Jonathan Cameron:
> > > > > +	iio_buffer_dmabuf_put(attach);
> > > > > +
> > > > > +out_dmabuf_put:
> > > > > +	dma_buf_put(dmabuf);
> > > > As below. Feels like a __free(dma_buf_put) bit of magic would
> > > > be a
> > > > nice to have.
> > > I'm working on the patches right now, just one quick question.
> > > 
> > > Having a __free(dma_buf_put) requires that dma_buf_put is first
> > > "registered" as a freeing function using DEFINE_FREE() in
> > > <linux/dma-
> > > buf.h>, which has not been done yet.
> > > 
> > > That would mean carrying a dma-buf specific patch in your tree,
> > > are you
> > > OK with that?
> > Needs an ACK from appropriate maintainer, but otherwise I'm fine
> > doing
> > so.  Alternative is to circle back to this later after this code is
> > upstream.
> 
> Separate patches for that please, the autocleanup feature is so new
> that 
> I'm not 100% convinced that everything works out smoothly from the
> start.

Separate patches is a given, did you mean outside this patchset?
Because I can send a separate patchset that introduces scope-based
management for dma_fence and dma_buf, but then it won't have users.

Cheers,
-Paul
Christian König Jan. 29, 2024, 1:17 p.m. UTC | #5
Am 29.01.24 um 14:06 schrieb Paul Cercueil:
> Hi Christian,
>
> Le lundi 29 janvier 2024 à 13:52 +0100, Christian König a écrit :
>> Am 27.01.24 um 17:50 schrieb Jonathan Cameron:
>>>>>> +	iio_buffer_dmabuf_put(attach);
>>>>>> +
>>>>>> +out_dmabuf_put:
>>>>>> +	dma_buf_put(dmabuf);
>>>>> As below. Feels like a __free(dma_buf_put) bit of magic would
>>>>> be a
>>>>> nice to have.
>>>> I'm working on the patches right now, just one quick question.
>>>>
>>>> Having a __free(dma_buf_put) requires that dma_buf_put is first
>>>> "registered" as a freeing function using DEFINE_FREE() in
>>>> <linux/dma-
>>>> buf.h>, which has not been done yet.
>>>>
>>>> That would mean carrying a dma-buf specific patch in your tree,
>>>> are you
>>>> OK with that?
>>> Needs an ACK from appropriate maintainer, but otherwise I'm fine
>>> doing
>>> so.  Alternative is to circle back to this later after this code is
>>> upstream.
>> Separate patches for that please, the autocleanup feature is so new
>> that
>> I'm not 100% convinced that everything works out smoothly from the
>> start.
> Separate patches is a given, did you mean outside this patchset?
> Because I can send a separate patchset that introduces scope-based
> management for dma_fence and dma_buf, but then it won't have users.

Outside of the patchset, this is essentially brand new stuff.

IIRC we have quite a number of dma_fence selftests and sw_sync which is 
basically code inside the drivers/dma-buf directory only there for 
testing DMA-buf functionality.

Convert those over as well and I'm more than happy to upstream this change.

Thanks,
Christian.

>
> Cheers,
> -Paul
Paul Cercueil Jan. 29, 2024, 2:15 p.m. UTC | #6
Le lundi 29 janvier 2024 à 14:32 +0100, Paul Cercueil a écrit :
> Le lundi 29 janvier 2024 à 14:17 +0100, Christian König a écrit :
> > Am 29.01.24 um 14:06 schrieb Paul Cercueil:
> > > Hi Christian,
> > > 
> > > Le lundi 29 janvier 2024 à 13:52 +0100, Christian König a écrit :
> > > > Am 27.01.24 um 17:50 schrieb Jonathan Cameron:
> > > > > > > > +	iio_buffer_dmabuf_put(attach);
> > > > > > > > +
> > > > > > > > +out_dmabuf_put:
> > > > > > > > +	dma_buf_put(dmabuf);
> > > > > > > As below. Feels like a __free(dma_buf_put) bit of magic
> > > > > > > would
> > > > > > > be a
> > > > > > > nice to have.
> > > > > > I'm working on the patches right now, just one quick
> > > > > > question.
> > > > > > 
> > > > > > Having a __free(dma_buf_put) requires that dma_buf_put is
> > > > > > first
> > > > > > "registered" as a freeing function using DEFINE_FREE() in
> > > > > > <linux/dma-
> > > > > > buf.h>, which has not been done yet.
> > > > > > 
> > > > > > That would mean carrying a dma-buf specific patch in your
> > > > > > tree,
> > > > > > are you
> > > > > > OK with that?
> > > > > Needs an ACK from appropriate maintainer, but otherwise I'm
> > > > > fine
> > > > > doing
> > > > > so.  Alternative is to circle back to this later after this
> > > > > code is
> > > > > upstream.
> > > > Separate patches for that please, the autocleanup feature is so
> > > > new
> > > > that
> > > > I'm not 100% convinced that everything works out smoothly from
> > > > the
> > > > start.
> > > Separate patches is a given, did you mean outside this patchset?
> > > Because I can send a separate patchset that introduces scope-
> > > based
> > > management for dma_fence and dma_buf, but then it won't have
> > > users.
> > 
> > Outside of the patchset, this is essentially brand new stuff.
> > 
> > IIRC we have quite a number of dma_fence selftests and sw_sync
> > which
> > is 
> > basically code inside the drivers/dma-buf directory only there for 
> > testing DMA-buf functionality.
> > 
> > Convert those over as well and I'm more than happy to upstream this
> > change.
> 
> Well there is very little to convert there; you can use scope-based
> management when the unref is done in all exit points of the
> functional
> block, and the only place I could find that does that in drivers/dma-
> buf/ was in dma_fence_chain_enable_signaling() in dma-fence-chain.c.

Actually - not even that, since it doesn't call dma_fence_get() and
dma_fence_put() on the same fence.

So I cannot use it anywhere in drivers/dma-buf/.

-Paul
diff mbox series

Patch

diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 09c41e9ccf87..24c040e073a7 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -13,10 +13,14 @@ 
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/cdev.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
 #include <linux/poll.h>
 #include <linux/sched/signal.h>
 
@@ -28,6 +32,31 @@ 
 #include <linux/iio/buffer.h>
 #include <linux/iio/buffer_impl.h>
 
+#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
+
+struct iio_dma_fence;
+
+struct iio_dmabuf_priv {
+	struct list_head entry;
+	struct kref ref;
+
+	struct iio_buffer *buffer;
+	struct iio_dma_buffer_block *block;
+
+	u64 context;
+	spinlock_t lock;
+
+	struct dma_buf_attachment *attach;
+	struct iio_dma_fence *fence;
+};
+
+struct iio_dma_fence {
+	struct dma_fence base;
+	struct iio_dmabuf_priv *priv;
+	struct sg_table *sgt;
+	enum dma_data_direction dir;
+};
+
 static const char * const iio_endian_prefix[] = {
 	[IIO_BE] = "be",
 	[IIO_LE] = "le",
@@ -332,6 +361,7 @@  void iio_buffer_init(struct iio_buffer *buffer)
 {
 	INIT_LIST_HEAD(&buffer->demux_list);
 	INIT_LIST_HEAD(&buffer->buffer_list);
+	INIT_LIST_HEAD(&buffer->dmabufs);
 	init_waitqueue_head(&buffer->pollq);
 	kref_init(&buffer->ref);
 	if (!buffer->watermark)
@@ -1519,14 +1549,54 @@  static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
 }
 
+static void iio_buffer_dmabuf_release(struct kref *ref)
+{
+	struct iio_dmabuf_priv *priv = container_of(ref, struct iio_dmabuf_priv, ref);
+	struct dma_buf_attachment *attach = priv->attach;
+	struct iio_buffer *buffer = priv->buffer;
+	struct dma_buf *dmabuf = attach->dmabuf;
+
+	buffer->access->detach_dmabuf(buffer, priv->block);
+
+	dma_buf_detach(attach->dmabuf, attach);
+	dma_buf_put(dmabuf);
+	kfree(priv);
+}
+
+void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach)
+{
+	struct iio_dmabuf_priv *priv = attach->importer_priv;
+
+	kref_get(&priv->ref);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_dmabuf_get);
+
+void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach)
+{
+	struct iio_dmabuf_priv *priv = attach->importer_priv;
+
+	kref_put(&priv->ref, iio_buffer_dmabuf_release);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_dmabuf_put);
+
 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
 {
 	struct iio_dev_buffer_pair *ib = filep->private_data;
 	struct iio_dev *indio_dev = ib->indio_dev;
 	struct iio_buffer *buffer = ib->buffer;
+	struct iio_dmabuf_priv *priv, *tmp;
 
 	wake_up(&buffer->pollq);
 
+	/* Close all attached DMABUFs */
+	list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) {
+		list_del_init(&priv->entry);
+		iio_buffer_dmabuf_put(priv->attach);
+	}
+
+	if (!list_empty(&buffer->dmabufs))
+		dev_warn(&indio_dev->dev, "Buffer FD closed with active transfers\n");
+
 	kfree(ib);
 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
 	iio_device_put(indio_dev);
@@ -1534,11 +1604,343 @@  static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
 	return 0;
 }
 
+static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
+{
+	int ret;
+
+	ret = dma_resv_lock_interruptible(dmabuf->resv, NULL);
+	if (ret) {
+		if (ret != -EDEADLK)
+			goto out;
+		if (nonblock) {
+			ret = -EBUSY;
+			goto out;
+		}
+
+		ret = dma_resv_lock_slow_interruptible(dmabuf->resv, NULL);
+	}
+
+out:
+	return ret;
+}
+
+static struct dma_buf_attachment *
+iio_buffer_find_attachment(struct iio_dev *indio_dev, struct dma_buf *dmabuf)
+{
+	struct dma_buf_attachment *elm, *attach = NULL;
+	int ret;
+
+	ret = iio_dma_resv_lock(dmabuf, false);
+	if (ret)
+		return ERR_PTR(ret);
+
+	list_for_each_entry(elm, &dmabuf->attachments, node) {
+		if (elm->dev == indio_dev->dev.parent) {
+			attach = elm;
+			break;
+		}
+	}
+
+	if (attach)
+		iio_buffer_dmabuf_get(elm);
+
+	dma_resv_unlock(dmabuf->resv);
+
+	return attach ?: ERR_PTR(-EPERM);
+}
+
+static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
+				    int __user *user_fd)
+{
+	struct iio_dev *indio_dev = ib->indio_dev;
+	struct iio_buffer *buffer = ib->buffer;
+	struct dma_buf_attachment *attach;
+	struct iio_dmabuf_priv *priv;
+	struct dma_buf *dmabuf;
+	int err, fd;
+
+	if (!buffer->access->attach_dmabuf
+	    || !buffer->access->detach_dmabuf
+	    || !buffer->access->enqueue_dmabuf)
+		return -EPERM;
+
+	if (copy_from_user(&fd, user_fd, sizeof(fd)))
+		return -EFAULT;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	spin_lock_init(&priv->lock);
+	priv->context = dma_fence_context_alloc(1);
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR(dmabuf)) {
+		err = PTR_ERR(dmabuf);
+		goto err_free_priv;
+	}
+
+	attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
+	if (IS_ERR(attach)) {
+		err = PTR_ERR(attach);
+		goto err_dmabuf_put;
+	}
+
+	kref_init(&priv->ref);
+	priv->buffer = buffer;
+	priv->attach = attach;
+	attach->importer_priv = priv;
+
+	priv->block = buffer->access->attach_dmabuf(buffer, attach);
+	if (IS_ERR(priv->block)) {
+		err = PTR_ERR(priv->block);
+		goto err_dmabuf_detach;
+	}
+
+	list_add(&priv->entry, &buffer->dmabufs);
+
+	return 0;
+
+err_dmabuf_detach:
+	dma_buf_detach(dmabuf, attach);
+err_dmabuf_put:
+	dma_buf_put(dmabuf);
+err_free_priv:
+	kfree(priv);
+
+	return err;
+}
+
+static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib, int *user_req)
+{
+	struct dma_buf_attachment *attach;
+	struct iio_dmabuf_priv *priv;
+	struct dma_buf *dmabuf;
+	int dmabuf_fd, ret = 0;
+
+	if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd)))
+		return -EFAULT;
+
+	dmabuf = dma_buf_get(dmabuf_fd);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	attach = iio_buffer_find_attachment(ib->indio_dev, dmabuf);
+	if (IS_ERR(attach)) {
+		ret = PTR_ERR(attach);
+		goto out_dmabuf_put;
+	}
+
+	priv = attach->importer_priv;
+	list_del_init(&priv->entry);
+
+	/*
+	 * Unref twice to release the reference obtained with
+	 * iio_buffer_find_attachment() above, and the one obtained in
+	 * iio_buffer_attach_dmabuf().
+	 */
+	iio_buffer_dmabuf_put(attach);
+	iio_buffer_dmabuf_put(attach);
+
+out_dmabuf_put:
+	dma_buf_put(dmabuf);
+
+	return ret;
+}
+
+static const char *
+iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
+{
+	return "iio";
+}
+
+static void iio_buffer_dma_fence_release(struct dma_fence *fence)
+{
+	struct iio_dma_fence *iio_fence =
+		container_of(fence, struct iio_dma_fence, base);
+
+	kfree(iio_fence);
+}
+
+static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
+	.get_driver_name	= iio_buffer_dma_fence_get_driver_name,
+	.get_timeline_name	= iio_buffer_dma_fence_get_driver_name,
+	.release		= iio_buffer_dma_fence_release,
+};
+
+static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib,
+				     struct iio_dmabuf __user *iio_dmabuf_req,
+				     bool nonblock)
+{
+	struct iio_dev *indio_dev = ib->indio_dev;
+	struct iio_buffer *buffer = ib->buffer;
+	struct iio_dmabuf iio_dmabuf;
+	struct dma_buf_attachment *attach;
+	struct iio_dmabuf_priv *priv;
+	enum dma_data_direction dir;
+	struct iio_dma_fence *fence;
+	struct dma_buf *dmabuf;
+	struct sg_table *sgt;
+	unsigned long timeout;
+	bool dma_to_ram;
+	bool cyclic;
+	int ret;
+
+	if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf)))
+		return -EFAULT;
+
+	if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
+		return -EINVAL;
+
+	cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
+
+	/* Cyclic flag is only supported on output buffers */
+	if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT)
+		return -EINVAL;
+
+	dmabuf = dma_buf_get(iio_dmabuf.fd);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) {
+		ret = -EINVAL;
+		goto err_dmabuf_put;
+	}
+
+	attach = iio_buffer_find_attachment(indio_dev, dmabuf);
+	if (IS_ERR(attach)) {
+		ret = PTR_ERR(attach);
+		goto err_dmabuf_put;
+	}
+
+	priv = attach->importer_priv;
+
+	dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
+	dir = dma_to_ram ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+	sgt = dma_buf_map_attachment(attach, dir);
+	if (IS_ERR(sgt)) {
+		ret = PTR_ERR(sgt);
+		dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", ret);
+		goto err_attachment_put;
+	}
+
+	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence) {
+		ret = -ENOMEM;
+		goto err_unmap_attachment;
+	}
+
+	fence->priv = priv;
+	fence->sgt = sgt;
+	fence->dir = dir;
+	priv->fence = fence;
+
+	dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
+		       &priv->lock, priv->context, 0);
+
+	ret = iio_dma_resv_lock(dmabuf, nonblock);
+	if (ret)
+		goto err_fence_put;
+
+	timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
+
+	/* Make sure we don't have writers */
+	ret = (int) dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
+					  true, timeout);
+	if (ret == 0)
+		ret = -EBUSY;
+	if (ret < 0)
+		goto err_resv_unlock;
+
+	if (dma_to_ram) {
+		/*
+		 * If we're writing to the DMABUF, make sure we don't have
+		 * readers
+		 */
+		ret = (int) dma_resv_wait_timeout(dmabuf->resv,
+						  DMA_RESV_USAGE_READ, true,
+						  timeout);
+		if (ret == 0)
+			ret = -EBUSY;
+		if (ret < 0)
+			goto err_resv_unlock;
+	}
+
+	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+	if (ret)
+		goto err_resv_unlock;
+
+	dma_resv_add_fence(dmabuf->resv, &fence->base,
+			   dma_resv_usage_rw(dma_to_ram));
+	dma_resv_unlock(dmabuf->resv);
+
+	ret = buffer->access->enqueue_dmabuf(buffer, priv->block, sgt,
+					     iio_dmabuf.bytes_used, cyclic);
+	if (ret)
+		iio_buffer_signal_dmabuf_done(attach, ret);
+
+	dma_buf_put(dmabuf);
+
+	return ret;
+
+err_resv_unlock:
+	dma_resv_unlock(dmabuf->resv);
+err_fence_put:
+	dma_fence_put(&fence->base);
+err_unmap_attachment:
+	dma_buf_unmap_attachment(attach, sgt, dir);
+err_attachment_put:
+	iio_buffer_dmabuf_put(attach);
+err_dmabuf_put:
+	dma_buf_put(dmabuf);
+
+	return ret;
+}
+
+void iio_buffer_signal_dmabuf_done(struct dma_buf_attachment *attach, int ret)
+{
+	struct iio_dmabuf_priv *priv = attach->importer_priv;
+	struct iio_dma_fence *fence = priv->fence;
+	enum dma_data_direction dir = fence->dir;
+	struct sg_table *sgt = fence->sgt;
+
+	dma_fence_get(&fence->base);
+	fence->base.error = ret;
+	dma_fence_signal(&fence->base);
+	dma_fence_put(&fence->base);
+
+	dma_buf_unmap_attachment(attach, sgt, dir);
+	iio_buffer_dmabuf_put(attach);
+}
+EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
+
+static long iio_buffer_chrdev_ioctl(struct file *filp,
+				    unsigned int cmd, unsigned long arg)
+{
+	struct iio_dev_buffer_pair *ib = filp->private_data;
+	void __user *_arg = (void __user *)arg;
+
+	switch (cmd) {
+	case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
+		return iio_buffer_attach_dmabuf(ib, _arg);
+	case IIO_BUFFER_DMABUF_DETACH_IOCTL:
+		return iio_buffer_detach_dmabuf(ib, _arg);
+	case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
+		return iio_buffer_enqueue_dmabuf(ib, _arg,
+						 filp->f_flags & O_NONBLOCK);
+	default:
+		return IIO_IOCTL_UNHANDLED;
+	}
+}
+
 static const struct file_operations iio_buffer_chrdev_fileops = {
 	.owner = THIS_MODULE,
 	.llseek = noop_llseek,
 	.read = iio_buffer_read,
 	.write = iio_buffer_write,
+	.unlocked_ioctl = iio_buffer_chrdev_ioctl,
+	.compat_ioctl = compat_ptr_ioctl,
 	.poll = iio_buffer_poll,
 	.release = iio_buffer_chrdev_release,
 };
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 89c3fd7c29ca..55d93705c96b 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -9,8 +9,11 @@ 
 #include <uapi/linux/iio/buffer.h>
 #include <linux/iio/buffer.h>
 
+struct dma_buf_attachment;
 struct iio_dev;
+struct iio_dma_buffer_block;
 struct iio_buffer;
+struct sg_table;
 
 /**
  * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
@@ -39,6 +42,13 @@  struct iio_buffer;
  *                      device stops sampling. Calles are balanced with @enable.
  * @release:		called when the last reference to the buffer is dropped,
  *			should free all resources allocated by the buffer.
+ * @attach_dmabuf:	called from userspace via ioctl to attach one external
+ *			DMABUF.
+ * @detach_dmabuf:	called from userspace via ioctl to detach one previously
+ *			attached DMABUF.
+ * @enqueue_dmabuf:	called from userspace via ioctl to queue this DMABUF
+ *			object to this buffer. Requires a valid DMABUF fd, that
+ *			was previouly attached to this buffer.
  * @modes:		Supported operating modes by this buffer type
  * @flags:		A bitmask combination of INDIO_BUFFER_FLAG_*
  *
@@ -68,6 +78,14 @@  struct iio_buffer_access_funcs {
 
 	void (*release)(struct iio_buffer *buffer);
 
+	struct iio_dma_buffer_block * (*attach_dmabuf)(struct iio_buffer *buffer,
+						       struct dma_buf_attachment *attach);
+	void (*detach_dmabuf)(struct iio_buffer *buffer,
+			      struct iio_dma_buffer_block *block);
+	int (*enqueue_dmabuf)(struct iio_buffer *buffer,
+			      struct iio_dma_buffer_block *block,
+			      struct sg_table *sgt, size_t size, bool cyclic);
+
 	unsigned int modes;
 	unsigned int flags;
 };
@@ -136,6 +154,9 @@  struct iio_buffer {
 
 	/* @ref: Reference count of the buffer. */
 	struct kref ref;
+
+	/* @dmabufs: List of DMABUF attachments */
+	struct list_head dmabufs;
 };
 
 /**
@@ -156,9 +177,14 @@  int iio_update_buffers(struct iio_dev *indio_dev,
  **/
 void iio_buffer_init(struct iio_buffer *buffer);
 
+void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach);
+void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach);
+
 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
 void iio_buffer_put(struct iio_buffer *buffer);
 
+void iio_buffer_signal_dmabuf_done(struct dma_buf_attachment *attach, int ret);
+
 #else /* CONFIG_IIO_BUFFER */
 
 static inline void iio_buffer_get(struct iio_buffer *buffer) {}
diff --git a/include/uapi/linux/iio/buffer.h b/include/uapi/linux/iio/buffer.h
index 13939032b3f6..c666aa95e532 100644
--- a/include/uapi/linux/iio/buffer.h
+++ b/include/uapi/linux/iio/buffer.h
@@ -5,6 +5,28 @@ 
 #ifndef _UAPI_IIO_BUFFER_H_
 #define _UAPI_IIO_BUFFER_H_
 
+#include <linux/types.h>
+
+/* Flags for iio_dmabuf.flags */
+#define IIO_BUFFER_DMABUF_CYCLIC		(1 << 0)
+#define IIO_BUFFER_DMABUF_SUPPORTED_FLAGS	0x00000001
+
+/**
+ * struct iio_dmabuf - Descriptor for a single IIO DMABUF object
+ * @fd:		file descriptor of the DMABUF object
+ * @flags:	one or more IIO_BUFFER_DMABUF_* flags
+ * @bytes_used:	number of bytes used in this DMABUF for the data transfer.
+ *		Should generally be set to the DMABUF's size.
+ */
+struct iio_dmabuf {
+	__u32 fd;
+	__u32 flags;
+	__u64 bytes_used;
+};
+
 #define IIO_BUFFER_GET_FD_IOCTL			_IOWR('i', 0x91, int)
+#define IIO_BUFFER_DMABUF_ATTACH_IOCTL		_IOW('i', 0x92, int)
+#define IIO_BUFFER_DMABUF_DETACH_IOCTL		_IOW('i', 0x93, int)
+#define IIO_BUFFER_DMABUF_ENQUEUE_IOCTL		_IOW('i', 0x94, struct iio_dmabuf)
 
 #endif /* _UAPI_IIO_BUFFER_H_ */