@@ -347,7 +347,6 @@ static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
struct vb2_v4l2_buffer *dec_buf = NULL;
struct vb2_v4l2_buffer *disp_buf = NULL;
struct vb2_queue *dst_vq = v4l2_m2m_get_dst_vq(m2m_ctx);
- struct queue_status_info q_status;
dev_dbg(inst->dev->dev, "%s: Fetch output info from firmware.", __func__);
@@ -441,20 +440,6 @@ static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
}
spin_unlock_irqrestore(&inst->state_spinlock, flags);
}
-
- /*
- * During a resolution change and while draining, the firmware may flush
- * the reorder queue regardless of having a matching decoding operation
- * pending. Only terminate the job if there are no more IRQ coming.
- */
- wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
- if (q_status.report_queue_count == 0 &&
- (q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
- dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
- pm_runtime_put_autosuspend(inst->dev->dev);
- v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
- }
}
static int wave5_vpu_dec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
@@ -1146,8 +1131,8 @@ static int write_to_ringbuffer(struct vpu_instance *inst, void *buffer, size_t b
static int fill_ringbuffer(struct vpu_instance *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
- struct v4l2_m2m_buffer *buf, *n;
- int ret;
+ struct vpu_src_buffer *vpu_buf;
+ int ret = 0;
if (m2m_ctx->last_src_buf) {
struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(m2m_ctx->last_src_buf);
@@ -1158,9 +1143,8 @@ static int fill_ringbuffer(struct vpu_instance *inst)
}
}
- v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
- struct vb2_v4l2_buffer *vbuf = &buf->vb;
- struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(vbuf);
+ list_for_each_entry(vpu_buf, &inst->avail_src_bufs, list) {
+ struct vb2_v4l2_buffer *vbuf = &vpu_buf->v4l2_m2m_buf.vb;
struct vpu_buf *ring_buffer = &inst->bitstream_vbuf;
size_t src_size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
void *src_buf = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
@@ -1220,9 +1204,13 @@ static int fill_ringbuffer(struct vpu_instance *inst)
dev_dbg(inst->dev->dev, "last src buffer written to the ring buffer\n");
break;
}
+
+ inst->queuing_num++;
+ list_del_init(&vpu_buf->list);
+ break;
}
- return 0;
+ return ret;
}
static void wave5_vpu_dec_buf_queue_src(struct vb2_buffer *vb)
@@ -1236,6 +1224,11 @@ static void wave5_vpu_dec_buf_queue_src(struct vb2_buffer *vb)
vbuf->sequence = inst->queued_src_buf_num++;
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ INIT_LIST_HEAD(&vpu_buf->list);
+ mutex_lock(&inst->feed_lock);
+ list_add_tail(&vpu_buf->list, &inst->avail_src_bufs);
+ mutex_unlock(&inst->feed_lock);
}
static void wave5_vpu_dec_buf_queue_dst(struct vb2_buffer *vb)
@@ -1385,6 +1378,13 @@ static int streamoff_output(struct vb2_queue *q)
dma_addr_t new_rd_ptr;
struct dec_output_info dec_info;
unsigned int i;
+ struct vpu_src_buffer *vpu_buf, *tmp;
+
+ inst->retry = false;
+ inst->queuing_num = 0;
+
+ list_for_each_entry_safe(vpu_buf, tmp, &inst->avail_src_bufs, list)
+ list_del_init(&vpu_buf->list);
for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
ret = wave5_vpu_dec_set_disp_flag(inst, i);
@@ -1580,10 +1580,19 @@ static void wave5_vpu_dec_device_run(void *priv)
dev_dbg(inst->dev->dev, "%s: Fill the ring buffer with new bitstream data", __func__);
pm_runtime_resume_and_get(inst->dev->dev);
- ret = fill_ringbuffer(inst);
- if (ret) {
- dev_warn(inst->dev->dev, "Filling ring buffer failed\n");
- goto finish_job_and_return;
+ if (!inst->retry) {
+ mutex_lock(&inst->feed_lock);
+ ret = fill_ringbuffer(inst);
+ mutex_unlock(&inst->feed_lock);
+ if (ret < 0) {
+ dev_warn(inst->dev->dev, "Filling ring buffer failed\n");
+ goto finish_job_and_return;
+ } else if (!inst->eos &&
+ inst->queuing_num == 0 &&
+ inst->state == VPU_INST_STATE_PIC_RUN) {
+ dev_dbg(inst->dev->dev, "%s: no bitstream for feeding, so skip ", __func__);
+ goto finish_job_and_return;
+ }
}
switch (inst->state) {
@@ -1639,7 +1648,7 @@ static void wave5_vpu_dec_device_run(void *priv)
}
if (q_status.instance_queue_count) {
- dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
return;
}
@@ -1650,14 +1659,21 @@ static void wave5_vpu_dec_device_run(void *priv)
dev_err(inst->dev->dev,
"Frame decoding on m2m context (%p), fail: %d (result: %d)\n",
m2m_ctx, ret, fail_res);
- break;
+ goto finish_job_and_return;
+ }
+
+ if (fail_res == WAVE5_SYSERR_QUEUEING_FAIL) {
+ inst->retry = true;
+ } else {
+ inst->retry = false;
+ if (!inst->eos)
+ inst->queuing_num--;
}
- /* Return so that we leave this job active */
- dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
- return;
- default:
- WARN(1, "Execution of a job in state %s illegal.\n", state_to_str(inst->state));
break;
+ default:
+ if (!v4l2_m2m_has_stopped(m2m_ctx))
+ WARN(1, "Execution of a job in state %s illegal.\n",
+ state_to_str(inst->state));
}
finish_job_and_return:
@@ -1755,6 +1771,8 @@ static int wave5_vpu_open_dec(struct file *filp)
inst->ops = &wave5_vpu_dec_inst_ops;
spin_lock_init(&inst->state_spinlock);
+ mutex_init(&inst->feed_lock);
+ INIT_LIST_HEAD(&inst->avail_src_bufs);
inst->codec_info = kzalloc(sizeof(*inst->codec_info), GFP_KERNEL);
if (!inst->codec_info)
@@ -255,6 +255,8 @@ int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res)
if (inst_count == 1)
pm_runtime_dont_use_autosuspend(vpu_dev->dev);
+ mutex_destroy(&inst->feed_lock);
+
unlock_and_return:
mutex_unlock(&vpu_dev->hw_lock);
pm_runtime_put_sync(inst->dev->dev);
@@ -818,6 +818,9 @@ struct vpu_instance {
bool cbcr_interleave;
bool nv21;
bool eos;
+ bool retry; /* retry to feed bitstream if failure reason is WAVE5_SYSERR_QUEUEING_FAIL*/
+ int queuing_num; /* check if there is input buffer or not */
+ struct mutex feed_lock; /* lock for feeding bitstream buffers */
struct vpu_buf bitstream_vbuf;
dma_addr_t last_rd_ptr;
size_t remaining_consumed_bytes;