@@ -397,7 +397,7 @@ static void uvcg_video_pump(struct work_struct *work)
bool buf_done;
int ret;
- while (video->ep->enabled && uvc->state == UVC_STATE_STREAMING) {
+ if (video->ep->enabled && uvc->state == UVC_STATE_STREAMING) {
/*
* Retrieve the first available USB request, protected by the
* request lock.
@@ -409,6 +409,11 @@ static void uvcg_video_pump(struct work_struct *work)
}
req = list_first_entry(&video->req_free, struct usb_request,
list);
+ if (!req) {
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return;
+ }
+
list_del(&req->list);
spin_unlock_irqrestore(&video->req_lock, flags);
@@ -437,7 +442,7 @@ static void uvcg_video_pump(struct work_struct *work)
* further.
*/
spin_unlock_irqrestore(&queue->irqlock, flags);
- break;
+ goto out;
}
/*
@@ -470,20 +475,23 @@ static void uvcg_video_pump(struct work_struct *work)
/* Queue the USB request */
ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&queue->irqlock, flags);
-
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
- break;
+ goto out;
}
/* Endpoint now owns the request */
req = NULL;
video->req_int_count++;
+ } else {
+ return;
}
- if (!req)
- return;
+ if (uvc->state == UVC_STATE_STREAMING)
+ queue_work(video->async_wq, &video->pump);
+ return;
+out:
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
The uvc_video_enable function is calling cancel_work_sync which will be blocking as long as new requests will be queued with the while loop. To ensure an earlier stop in the pumping loop in this particular case we rework the worker to requeue itself on every requests. Since the worker is already running prioritized, the scheduling overhad did not have real impact on the performance. Signed-off-by: Michael Grzeschik <m.grzeschik@pengutronix.de> --- v1 == v2 drivers/usb/gadget/function/uvc_video.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-)