@@ -20,6 +20,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <trace/events/v4l2.h>
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
@@ -281,6 +282,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
+ trace_v4l2_m2m_run_job(m2m_dev->curr_ctx);
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
@@ -300,10 +302,12 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
+ trace_v4l2_m2m_schedule(m2m_ctx);
if (!m2m_ctx->out_q_ctx.q.streaming
|| !m2m_ctx->cap_q_ctx.q.streaming) {
dprintk("Streaming needs to be on for both queues\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_NOT_STREAMING);
return;
}
@@ -312,11 +316,13 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
/* If the context is aborted then don't schedule it */
if (m2m_ctx->job_flags & TRANS_ABORT) {
dprintk("Aborted context\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_ABORTED);
goto job_unlock;
}
if (m2m_ctx->job_flags & TRANS_QUEUED) {
dprintk("On job queue already\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_ALREADY_QUEUED);
goto job_unlock;
}
@@ -324,10 +330,12 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
dst = v4l2_m2m_next_dst_buf(m2m_ctx);
if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_NO_SRC);
goto job_unlock;
}
if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_NO_DST);
goto job_unlock;
}
@@ -343,6 +351,7 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available after returning held buffer\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_NO_DST_POST_HELD);
goto job_unlock;
}
}
@@ -354,17 +363,20 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
if (m2m_ctx->has_stopped) {
dprintk("Device has stopped\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_DEV_STOPPED);
goto job_unlock;
}
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
dprintk("Driver not ready\n");
+ trace_v4l2_m2m_schedule_failed(m2m_ctx, V4L2_M2M_DRV_NOT_READY);
goto job_unlock;
}
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
+ trace_v4l2_m2m_queue_job(m2m_ctx, src ? &src->vb2_buf : NULL, dst ? &dst->vb2_buf : NULL);
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
@@ -426,6 +438,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
+ trace_v4l2_m2m_cancel_job_wait(m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
@@ -438,6 +451,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
/* Do nothing, was not on queue/running */
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
}
+ trace_v4l2_m2m_cancel_job(m2m_ctx);
}
/*
@@ -477,6 +491,7 @@ static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ trace_v4l2_m2m_finish_job(m2m_ctx);
return true;
}
@@ -11,3 +11,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf);
EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf);
EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_ioctl_s_fmt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_schedule);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_schedule_failed);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_queue_job);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_run_job);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_cancel_job_wait);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_cancel_job);
+EXPORT_TRACEPOINT_SYMBOL_GPL(v4l2_m2m_finish_job);
@@ -15,6 +15,17 @@
#include <media/videobuf2-v4l2.h>
+enum v4l2_m2m_state {
+ V4L2_M2M_NOT_STREAMING,
+ V4L2_M2M_ABORTED,
+ V4L2_M2M_ALREADY_QUEUED,
+ V4L2_M2M_NO_SRC,
+ V4L2_M2M_NO_DST,
+ V4L2_M2M_NO_DST_POST_HELD,
+ V4L2_M2M_DEV_STOPPED,
+ V4L2_M2M_DRV_NOT_READY
+};
+
/**
* struct v4l2_m2m_ops - mem-to-mem device driver callbacks
* @device_run: required. Begin the actual job (transaction) inside this
@@ -10,6 +10,7 @@
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
@@ -55,6 +56,21 @@ SHOW_TYPE
SHOW_FIELD
+#define show_reason(state) \
+ __print_symbolic(state, SHOW_REASON)
+
+#define SHOW_REASON \
+ EM( V4L2_M2M_NOT_STREAMING, "NOT_STREAMING" ) \
+ EM( V4L2_M2M_ABORTED, "ABORTED" ) \
+ EM( V4L2_M2M_ALREADY_QUEUED, "ALREADY_QUEUED" ) \
+ EM( V4L2_M2M_NO_SRC, "NO_SRC" ) \
+ EM( V4L2_M2M_NO_DST, "NO_DST" ) \
+ EM( V4L2_M2M_NO_DST_POST_HELD, "NO_DST_POST_HELD" ) \
+ EM( V4L2_M2M_DEV_STOPPED, "DEV_STOPPED" ) \
+ EMe(V4L2_M2M_DRV_NOT_READY, "DRV_NOT_READY" )
+
+SHOW_REASON
+
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
* that will be printed in the output.
@@ -266,6 +282,118 @@ DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf,
TP_ARGS(q, vb)
);
+/*
+ * v4l_m2m job tracing
+ * expected order of events:
+ * v4l2_m2m_schedule <= start of a job trace
+ * [v4l2_m2m_schedule_failed*]
+ * v4l2_m2m_queue_job <= job queued on list of ready jobs
+ * v4l2_m2m_run_job <= driver told to run the job
+ * [v4l2_m2m_cancel_job_wait] <= job cancelled, but waiting for completion as it is already running
+ * [v4l2_m2m_cancel_job*] <= job cancelled
+ * v4l2_m2m_finish_job <= job finished, end of trace
+ *
+ * events in [] indicate optional events, that may appear, but usually would not be expected
+ * events with * indicate terminal events that end a trace early
+ *
+ * A typical job timeline would be a 3 segment period:
+ * [ scheduled | queued | running ]
+ * ^ ^ ^ ^
+ * |-----------|--------|---------|-- v4l2_m2m_schedule
+ * |--------|---------|-- v4l2_m2m_queue_job
+ * |---------|-- v4l2_m2m_run_job
+ * |-- v4l2_m2m_finish_job
+ */
+DECLARE_EVENT_CLASS(v4l2_m2m_event_class,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx),
+ TP_ARGS(ctx),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(struct v4l2_fh *, fh)
+ ),
+
+ TP_fast_assign(
+ struct v4l2_fh *owner = ctx->cap_q_ctx.q.owner;
+
+ __entry->minor = owner ? owner->vdev->minor : -1;
+ __entry->fh = owner;
+ ),
+
+ TP_printk("minor = %d, fh = %p",
+ __entry->minor, __entry->fh)
+)
+
+DEFINE_EVENT(v4l2_m2m_event_class, v4l2_m2m_schedule,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+TRACE_EVENT(v4l2_m2m_schedule_failed,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx, enum v4l2_m2m_state reason),
+ TP_ARGS(ctx, reason),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(struct v4l2_fh *, fh)
+ __field(enum v4l2_m2m_state, reason)
+ ),
+
+ TP_fast_assign(
+ struct v4l2_fh *owner = ctx->cap_q_ctx.q.owner;
+
+ __entry->minor = owner ? owner->vdev->minor : -1;
+ __entry->fh = owner;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("minor = %d, fh = %p, reason = %s",
+ __entry->minor, __entry->fh, show_reason(__entry->reason))
+)
+
+TRACE_EVENT(v4l2_m2m_queue_job,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx, struct vb2_buffer *src, struct vb2_buffer *dst),
+ TP_ARGS(ctx, src, dst),
+
+ TP_STRUCT__entry(
+ __field(int, minor)
+ __field(struct v4l2_fh *, fh)
+ __field(s32, src)
+ __field(s32, dst)
+ ),
+
+ TP_fast_assign(
+ struct v4l2_fh *owner = ctx->cap_q_ctx.q.owner;
+
+ __entry->minor = owner ? owner->vdev->minor : -1;
+ __entry->fh = owner;
+ __entry->src = src ? (s32)src->index : -1;
+ __entry->dst = dst ? (s32)dst->index : -1;
+ ),
+ TP_printk("minor = %d, fh = %p, src = %d, dst = %d",
+ __entry->minor, __entry->fh, __entry->src, __entry->dst)
+);
+
+DEFINE_EVENT(v4l2_m2m_event_class, v4l2_m2m_run_job,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(v4l2_m2m_event_class, v4l2_m2m_cancel_job_wait,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(v4l2_m2m_event_class, v4l2_m2m_cancel_job,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(v4l2_m2m_event_class, v4l2_m2m_finish_job,
+ TP_PROTO(struct v4l2_m2m_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
#ifdef CREATE_TRACE_POINTS
#define __trace_array_name(a, arr, num) (((unsigned)(a)) < num ? arr[a] : "unknown")
static inline void __trace_sprint_v4l2_format(char *str, size_t size, struct v4l2_format *p)