@@ -1097,6 +1097,11 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
+static inline struct device *stream_to_dmadev(struct uvc_streaming *stream)
+{
+ return stream->dev->udev->bus->controller->parent;
+}
+
/*
* uvc_video_decode_data_work: Asynchronous memcpy processing
*
@@ -1118,6 +1123,9 @@ static void uvc_video_copy_data_work(struct work_struct *work)
uvc_queue_buffer_release(op->buf);
}
+ if (uvc_urb->pages)
+ dma_sync_sgtable_for_device(stream_to_dmadev(uvc_urb->stream),
+ &uvc_urb->sgt, DMA_FROM_DEVICE);
ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
if (ret < 0)
uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
@@ -1539,10 +1547,17 @@ static void uvc_video_complete(struct urb *urb)
* Process the URB headers, and optionally queue expensive memcpy tasks
* to be deferred to a work queue.
*/
+ if (uvc_urb->pages)
+ dma_sync_sgtable_for_cpu(stream_to_dmadev(stream),
+ &uvc_urb->sgt, DMA_FROM_DEVICE);
stream->decode(uvc_urb, buf, buf_meta);
/* If no async work is needed, resubmit the URB immediately. */
if (!uvc_urb->async_operations) {
+ if (uvc_urb->pages)
+ dma_sync_sgtable_for_device(stream_to_dmadev(stream),
+ &uvc_urb->sgt,
+ DMA_FROM_DEVICE);
ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
if (ret < 0)
uvc_printk(KERN_ERR,
@@ -1565,18 +1580,61 @@ static void uvc_free_urb_buffers(struct uvc_streaming *stream)
if (!uvc_urb->buffer)
continue;
-#ifndef CONFIG_DMA_NONCOHERENT
- usb_free_coherent(stream->dev->udev, stream->urb_size,
- uvc_urb->buffer, uvc_urb->dma);
-#else
- kfree(uvc_urb->buffer);
-#endif
+ if (uvc_urb->pages) {
+ sg_free_table(&uvc_urb->sgt);
+ vunmap(uvc_urb->buffer);
+ dma_free_noncontiguous(stream_to_dmadev(stream),
+ stream->urb_size,
+ uvc_urb->pages, uvc_urb->dma);
+ } else {
+ usb_free_coherent(stream->dev->udev, stream->urb_size,
+ uvc_urb->buffer, uvc_urb->dma);
+ }
uvc_urb->buffer = NULL;
}
stream->urb_size = 0;
}
+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+ struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+ struct device *dma_dev = dma_dev = stream_to_dmadev(stream);
+
+ if (!dma_can_alloc_noncontiguous(dma_dev)) {
+ uvc_urb->buffer = usb_alloc_coherent(stream->dev->udev,
+ stream->urb_size,
+ gfp_flags | __GFP_NOWARN,
+ &uvc_urb->dma);
+ return uvc_urb->buffer != NULL;
+ }
+
+ uvc_urb->pages = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
+ &uvc_urb->dma, gfp_flags, 0);
+ if (!uvc_urb->pages)
+ return false;
+
+ uvc_urb->buffer = vmap(uvc_urb->pages,
+ PAGE_ALIGN(stream->urb_size) >> PAGE_SHIFT,
+ VM_MAP, PAGE_KERNEL);
+ if (!uvc_urb->buffer) {
+ dma_free_noncontiguous(dma_dev, stream->urb_size,
+ uvc_urb->pages, uvc_urb->dma);
+ return false;
+ }
+
+ if (sg_alloc_table_from_pages(&uvc_urb->sgt, uvc_urb->pages,
+ PAGE_ALIGN(stream->urb_size) >> PAGE_SHIFT, 0,
+ stream->urb_size, GFP_KERNEL)) {
+ vunmap(uvc_urb->buffer);
+ dma_free_noncontiguous(dma_dev, stream->urb_size,
+ uvc_urb->pages, uvc_urb->dma);
+ return false;
+ }
+
+ return true;
+}
+
/*
* Allocate transfer buffers. This function can be called with buffers
* already allocated when resuming from suspend, in which case it will
@@ -1607,19 +1665,11 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
/* Retry allocations until one succeed. */
for (; npackets > 1; npackets /= 2) {
+ stream->urb_size = psize * npackets;
for (i = 0; i < UVC_URBS; ++i) {
struct uvc_urb *uvc_urb = &stream->uvc_urb[i];
- stream->urb_size = psize * npackets;
-#ifndef CONFIG_DMA_NONCOHERENT
- uvc_urb->buffer = usb_alloc_coherent(
- stream->dev->udev, stream->urb_size,
- gfp_flags | __GFP_NOWARN, &uvc_urb->dma);
-#else
- uvc_urb->buffer =
- kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
-#endif
- if (!uvc_urb->buffer) {
+ if (!uvc_alloc_urb_buffer(stream, uvc_urb, gfp_flags)) {
uvc_free_urb_buffers(stream);
break;
}
@@ -1728,12 +1778,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
urb->context = uvc_urb;
urb->pipe = usb_rcvisocpipe(stream->dev->udev,
ep->desc.bEndpointAddress);
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma;
-#else
- urb->transfer_flags = URB_ISO_ASAP;
-#endif
urb->interval = ep->desc.bInterval;
urb->transfer_buffer = uvc_urb->buffer;
urb->complete = uvc_video_complete;
@@ -1793,10 +1839,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer,
size, uvc_video_complete, uvc_urb);
-#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
urb->transfer_dma = uvc_urb->dma;
-#endif
uvc_urb->urb = urb;
}
@@ -1891,6 +1935,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
/* Submit the URBs. */
for_each_uvc_urb(uvc_urb, stream) {
+ if (uvc_urb->pages)
+ dma_sync_sgtable_for_device(stream_to_dmadev(stream),
+ &uvc_urb->sgt,
+ DMA_FROM_DEVICE);
ret = usb_submit_urb(uvc_urb->urb, gfp_flags);
if (ret < 0) {
uvc_printk(KERN_ERR, "Failed to submit URB %u (%d).\n",
@@ -532,6 +532,8 @@ struct uvc_urb {
char *buffer;
dma_addr_t dma;
+ struct page **pages;
+ struct sg_table sgt;
unsigned int async_operations;
struct uvc_copy_op copy_operations[UVC_MAX_PACKETS];
On architectures where the is no coherent caching such as ARM use the dma_alloc_noncontiguos API and handle manually the cache flushing using dma_sync_sgtable(). With this patch on the affected architectures we can measure up to 20x performance improvement in uvc_video_copy_data_work(). Signed-off-by: Ricardo Ribalda <ribalda@chromium.org> --- drivers/media/usb/uvc/uvc_video.c | 92 +++++++++++++++++++++++-------- drivers/media/usb/uvc/uvcvideo.h | 2 + 2 files changed, 72 insertions(+), 22 deletions(-) v4: Thanks to Crhistoph and Sergei - Remove the CONFIG_DMA_NONCOHERENT path - Do not pass __GFP_NOWARN