1
0
Fork 0

LF-840-5:[8QM_MEK/8QXP_MEK]mxc:vpu_malone:abort or stop cmd may timeout after resume from suspend when run multi h264 instance

1. make sure mu interrupt in the fifo has been handled.
2. make sure event in the fifo has been handled.

Signed-off-by: Ming Qian <ming.qian@nxp.com>
Acked-by: Shijie Qin <shijie.qin@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Ming Qian 2020-02-19 15:28:48 +08:00
parent b61e7b6411
commit 69dcbb5bd9
4 changed files with 50 additions and 27 deletions

View File

@ -93,7 +93,6 @@ static int send_stop_cmd(struct vpu_ctx *ctx);
static int vpu_dec_cmd_reset(struct vpu_ctx *ctx);
static void vpu_dec_event_decode_error(struct vpu_ctx *ctx);
static void vpu_calculate_performance(struct vpu_ctx *ctx, u_int32 uEvent, const char *str);
static void vpu_notify_msg_event(struct vpu_dev *dev);
static void vpu_dec_cancel_work(struct vpu_dev *vpudev);
#define CHECK_BIT(var, pos) (((var) >> (pos)) & 1)
@ -1494,7 +1493,6 @@ static int v4l2_ioctl_qbuf(struct file *file,
return -EINVAL;
}
vpu_notify_msg_event(ctx->dev);
ret = vpu_dec_queue_qbuf(q_data, buf);
if (ret) {
vpu_err("error: %s() return ret=%d\n", __func__, ret);
@ -1588,7 +1586,6 @@ static int v4l2_ioctl_dqbuf(struct file *file,
else
return -EINVAL;
vpu_notify_msg_event(ctx->dev);
ret = vpu_dec_queue_dqbuf(q_data, buf, file->f_flags & O_NONBLOCK);
if (ret) {
@ -3070,6 +3067,7 @@ static int send_stop_cmd(struct vpu_ctx *ctx)
reinit_completion(&ctx->stop_cmp);
v4l2_vpu_send_cmd(ctx, ctx->str_index, VID_API_CMD_STOP, 0, NULL);
if (!wait_for_completion_timeout(&ctx->stop_cmp, msecs_to_jiffies(1000))) {
vpu_dec_clear_pending_cmd(ctx);
ctx->hang_status = true;
vpu_err("the path id: %d firmware timeout after send %s\n",
@ -4733,26 +4731,6 @@ static bool receive_msg_queue(struct vpu_ctx *ctx, struct event_msg *msg)
return false;
}
static void vpu_notify_msg_event(struct vpu_dev *dev)
{
int i;
mutex_lock(&dev->dev_mutex);
if (dev->suspend)
goto exit;
for (i = 0; i < VPU_MAX_NUM_STREAMS; i++) {
struct vpu_ctx *ctx = dev->ctx[i];
if (!ctx || ctx->ctx_released || !kfifo_len(&ctx->msg_fifo))
continue;
queue_work(ctx->instance_wq, ctx->instance_work);
}
exit:
mutex_unlock(&dev->dev_mutex);
}
extern u_int32 rpc_MediaIPFW_Video_message_check(struct shared_addr *This);
static void vpu_receive_msg_event(struct vpu_dev *dev)
{
struct event_msg msg;
@ -4766,6 +4744,7 @@ static void vpu_receive_msg_event(struct vpu_dev *dev)
memset(&msg, 0, sizeof(struct event_msg));
while (rpc_MediaIPFW_Video_message_check(This) == API_MSG_AVAILABLE) {
rpc_receive_msg_buf(This, &msg);
mutex_lock(&dev->dev_mutex);
ctx = dev->ctx[msg.idx];
if (ctx)
@ -4773,7 +4752,9 @@ static void vpu_receive_msg_event(struct vpu_dev *dev)
if (ctx != NULL && !ctx->ctx_released) {
send_msg_queue(ctx, &msg);
queue_work(ctx->instance_wq, ctx->instance_work);
queue_delayed_work(ctx->instance_wq,
ctx->delayed_instance_work,
msecs_to_jiffies(10));
} else {
vpu_err("msg [%d] %d is missed!%s\n",
msg.idx, msg.msgid,
@ -4783,8 +4764,6 @@ static void vpu_receive_msg_event(struct vpu_dev *dev)
}
if (rpc_MediaIPFW_Video_message_check(This) == API_MSG_BUFFER_ERROR)
vpu_err("error: message size is too big to handle\n");
vpu_notify_msg_event(dev);
}
static void vpu_handle_msg_data(struct vpu_dev *dev, u32 data)
@ -4824,6 +4803,19 @@ static void vpu_msg_run_work(struct work_struct *work)
vpu_handle_msg_data(dev, data);
}
static void vpu_msg_run_delayed_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct vpu_dev *dev;
dwork = to_delayed_work(work);
dev = container_of(dwork, struct vpu_dev, delayed_msg_work);
if (!kfifo_len(&dev->mu_msg_fifo))
return;
queue_work(dev->workqueue, &dev->msg_work);
}
static void vpu_msg_instance_work(struct work_struct *work)
{
struct vpu_ctx_work *ctx_work;
@ -4843,6 +4835,22 @@ static void vpu_msg_instance_work(struct work_struct *work)
}
}
static void vpu_msg_delayed_instance_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct vpu_ctx_work *ctx_work;
struct vpu_ctx *ctx;
dwork = to_delayed_work(work);
ctx_work = container_of(dwork, struct vpu_ctx_work,
delayed_instance_work);
ctx = ctx_work->dev->ctx[ctx_work->str_index];
if (!ctx || ctx->ctx_released || !kfifo_len(&ctx->msg_fifo))
return;
queue_work(ctx->instance_wq, ctx->instance_work);
}
static bool vpu_dec_alloc_buffer_item(struct vpu_ctx *ctx,
u32 index, u32 count, u32 size,
struct dma_buffer *buffers,
@ -6022,6 +6030,7 @@ static int v4l2_open(struct file *filp)
goto err_alloc_wq;
}
ctx->instance_work = &dev->ctx_work[idx].instance_work;
ctx->delayed_instance_work = &dev->ctx_work[idx].delayed_instance_work;
ctx->alloc_work = &dev->ctx_work[idx].alloc_work;
mutex_init(&ctx->instance_mutex);
@ -6197,6 +6206,7 @@ static int v4l2_release(struct file *filp)
ctx->ctx_released = true;
mutex_unlock(&ctx->dev->dev_mutex);
cancel_delayed_work_sync(ctx->delayed_instance_work);
cancel_work_sync(ctx->instance_work);
cancel_work_sync(ctx->alloc_work);
kfifo_free(&ctx->msg_fifo);
@ -6231,7 +6241,6 @@ static unsigned int v4l2_poll(struct file *filp, poll_table *wait)
vpu_dbg(LVL_BIT_FUNC, "%s()\n", __func__);
vpu_notify_msg_event(ctx->dev);
poll_wait(filp, &ctx->fh.wait, wait);
if (v4l2_event_pending(&ctx->fh)) {
@ -6500,6 +6509,8 @@ static void vpu_dec_init_ctx_work(struct vpu_dev *dev)
ctx_work->str_index = i;
ctx_work->dev = dev;
INIT_WORK(&ctx_work->instance_work, vpu_msg_instance_work);
INIT_DELAYED_WORK(&ctx_work->delayed_instance_work,
vpu_msg_delayed_instance_work);
INIT_WORK(&ctx_work->alloc_work, vpu_alloc_work);
}
}
@ -6575,6 +6586,7 @@ static int vpu_probe(struct platform_device *pdev)
}
INIT_WORK(&dev->msg_work, vpu_msg_run_work);
INIT_DELAYED_WORK(&dev->delayed_msg_work, vpu_msg_run_delayed_work);
vpu_enable_hw(dev);
pm_runtime_enable(&pdev->dev);
@ -6715,10 +6727,12 @@ static void vpu_dec_cancel_work(struct vpu_dev *vpudev)
vpudev->suspend = true;
mutex_unlock(&vpudev->dev_mutex);
cancel_delayed_work_sync(&vpudev->delayed_msg_work);
cancel_work_sync(&vpudev->msg_work);
for (i = 0; i < VPU_MAX_NUM_STREAMS; i++) {
struct vpu_ctx_work *ctx_work = &vpudev->ctx_work[i];
cancel_delayed_work_sync(&ctx_work->delayed_instance_work);
cancel_work_sync(&ctx_work->instance_work);
cancel_work_sync(&ctx_work->alloc_work);
}

View File

@ -268,6 +268,7 @@ struct vpu_sc_chan {
struct vpu_ctx_work {
struct work_struct instance_work;
struct delayed_work delayed_instance_work;
struct work_struct alloc_work;
int str_index;
struct vpu_dev *dev;
@ -296,6 +297,7 @@ struct vpu_dev {
struct completion snap_done_cmp;
struct workqueue_struct *workqueue;
struct work_struct msg_work;
struct delayed_work delayed_msg_work;
unsigned long instance_mask;
unsigned long hang_mask; //this is used to deal with hang issue to reset firmware
struct clk *vpu_clk;
@ -409,6 +411,7 @@ struct vpu_ctx {
struct kfifo msg_fifo;
struct mutex instance_mutex;
struct work_struct *instance_work;
struct delayed_work *delayed_instance_work;
struct work_struct *alloc_work;
struct workqueue_struct *instance_wq;
struct completion completion;

View File

@ -32,6 +32,8 @@ static void vpu_mu_inq_msg(struct vpu_dev *dev, void *msg)
}
queue_work(dev->workqueue, &dev->msg_work);
queue_delayed_work(dev->workqueue,
&dev->delayed_msg_work, msecs_to_jiffies(10));
}
static void vpu_mbox_free(struct vpu_dev *dev)
@ -174,6 +176,9 @@ u_int32 vpu_mu_receive_msg(struct vpu_dev *dev, void *msg)
ret);
} else {
ret = kfifo_len(&dev->mu_msg_fifo);
if (ret)
vpu_err("error: broken msg\n");
ret = 0;
}
return ret;

View File

@ -114,5 +114,6 @@ void rpc_send_cmd_buf(struct shared_addr *This,
u_int32 cmdnum,
u_int32 *local_cmddata);
void rpc_receive_msg_buf(struct shared_addr *This, struct event_msg *msg);
u_int32 rpc_MediaIPFW_Video_message_check(struct shared_addr *This);
#endif