1
0
Fork 0

MLK-24090 mxc: ipu3: ipu_device: Fix AB-BA deadlock between vdoa_lock and ipu_ch_tbl.lock

In VDOA_MODE, _get_vdoa_ipu_res() would lock ipu_ch_tbl.lock mutex first,
then lock vdoa_lock mutex and finally unlock ipu_ch_tbl.lock mutex.
The vdoa_lock mutex is unlocked until put_vdoa_ipu_res().  Since the two
mutexes are not unlocked in a reversed order as they are locked, AB-BA
deadlock issue may happen as the below warning shows, which can be detected
with the debug option CONFIG_PROVE_LOCKING enabled.

[   52.398770] ======================================================
[   52.404957] WARNING: possible circular locking dependency detected
[   52.411145] 5.4.24 #1477 Not tainted
[   52.414728] ------------------------------------------------------
[   52.420915] ipu1_task/92 is trying to acquire lock:
[   52.425800] 81f02274 (&ipu_ch_tbl.lock){+.+.}, at: get_res_do_task+0x144/0x77c
[   52.433050]
[   52.433050] but task is already holding lock:
[   52.438888] 8183189c (vdoa_lock){+.+.}, at: vdoa_get_handle+0x64/0x158
[   52.445434]
[   52.445434] which lock already depends on the new lock.
[   52.445434]
[   52.453615]
[   52.453615] the existing dependency chain (in reverse order) is:
[   52.461101]
[   52.461101] -> #1 (vdoa_lock){+.+.}:
[   52.466175]        __mutex_lock+0xb8/0xaa8
[   52.470283]        mutex_lock_nested+0x2c/0x34
[   52.474735]        vdoa_get_handle+0x64/0x158
[   52.479100]        _get_vdoa_ipu_res+0x2b4/0x338
[   52.483726]        get_res_do_task+0x34/0x77c
[   52.488092]        ipu_task_thread+0x148/0xeb0
[   52.492551]        kthread+0x168/0x170
[   52.496310]        ret_from_fork+0x14/0x20
[   52.500414]        0x0
[   52.502779]
[   52.502779] -> #0 (&ipu_ch_tbl.lock){+.+.}:
[   52.508457]        __lock_acquire+0x15d0/0x2588
[   52.512995]        lock_acquire+0xdc/0x280
[   52.517103]        __mutex_lock+0xb8/0xaa8
[   52.521210]        mutex_lock_nested+0x2c/0x34
[   52.525662]        get_res_do_task+0x144/0x77c
[   52.530113]        ipu_task_thread+0x148/0xeb0
[   52.534566]        kthread+0x168/0x170
[   52.538322]        ret_from_fork+0x14/0x20
[   52.542425]        0x0
[   52.544790]
[   52.544790] other info that might help us debug this:
[   52.544790]
[   52.552797]  Possible unsafe locking scenario:
[   52.552797]
[   52.558721]        CPU0                    CPU1
[   52.563256]        ----                    ----
[   52.567790]   lock(vdoa_lock);
[   52.570853]                                lock(&ipu_ch_tbl.lock);
[   52.577040]                                lock(vdoa_lock);
[   52.582619]   lock(&ipu_ch_tbl.lock);
[   52.586289]
[   52.586289]  *** DEADLOCK ***
[   52.586289]
[   52.592215] 1 lock held by ipu1_task/92:
[   52.596142]  #0: 8183189c (vdoa_lock){+.+.}, at: vdoa_get_handle+0x64/0x158
[   52.603123]
[   52.603123] stack backtrace:
[   52.607493] CPU: 0 PID: 92 Comm: ipu1_task Not tainted 5.4.24 #1477
[   52.613765] Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree)
[   52.620298] Backtrace:
[   52.622764] [<8010f094>] (dump_backtrace) from [<8010f3ac>] (show_stack+0x20/0x24)
[   52.630345]  r7:818641a8 r6:00000000 r5:600f0193 r4:818641a8
[   52.636026] [<8010f38c>] (show_stack) from [<80f534d8>] (dump_stack+0xbc/0xe8)
[   52.643261] [<80f5341c>] (dump_stack) from [<8019c8f4>] (print_circular_bug+0x1c4/0x214)
[   52.651361]  r10:a8330000 r9:ffffffff r8:a8330000 r7:a8330550 r6:81d13b1c r5:81d13ac0
[   52.659197]  r4:81d13ac0 r3:46e9f8d1
[   52.662783] [<8019c730>] (print_circular_bug) from [<8019cb48>] (check_noncircular+0x204/0x21c)
[   52.671492]  r9:00000001 r8:81708f50 r7:00000000 r6:a8423a98 r5:a8330530 r4:a8330550
[   52.679245] [<8019c944>] (check_noncircular) from [<8019f274>] (__lock_acquire+0x15d0/0x2588)
[   52.687778]  r8:81708f50 r7:81d13ac0 r6:00000001 r5:81e93d7c r4:a8330530
[   52.694491] [<8019dca4>] (__lock_acquire) from [<801a0b84>] (lock_acquire+0xdc/0x280)
[   52.702334]  r10:00000000 r9:00000000 r8:00000000 r7:81f02274 r6:600f0113 r5:81708724
[   52.710169]  r4:00000000
[   52.712717] [<801a0aa8>] (lock_acquire) from [<80f71938>] (__mutex_lock+0xb8/0xaa8)
[   52.720384]  r10:81e93d7c r9:0000f6d0 r8:81f022e8 r7:00008f50 r6:00000000 r5:ffffe000
[   52.728219]  r4:81f02240
[   52.730765] [<80f71880>] (__mutex_lock) from [<80f72354>] (mutex_lock_nested+0x2c/0x34)
[   52.738778]  r10:00000000 r9:a8423ccc r8:81f022e8 r7:000002d0 r6:8188b6f8 r5:81f02234
[   52.746613]  r4:a4540400
[   52.749162] [<80f72328>] (mutex_lock_nested) from [<80b8c830>] (get_res_do_task+0x144/0x77c)
[   52.757612] [<80b8c6ec>] (get_res_do_task) from [<80b8d6a0>] (ipu_task_thread+0x148/0xeb0)
[   52.765886]  r10:a8139bd0 r9:a8423ccc r8:81f022e8 r7:a4540400 r6:81831604 r5:a454053c
[   52.773721]  r4:600f0013
[   52.776269] [<80b8d558>] (ipu_task_thread) from [<801684c8>] (kthread+0x168/0x170)
[   52.783849]  r10:a8139bd0 r9:80b8d558 r8:81f022e8 r7:a8422000 r6:00000000 r5:a840cd00
[   52.791684]  r4:a83cc080
[   52.794231] [<80168360>] (kthread) from [<801010b4>] (ret_from_fork+0x14/0x20)
[   52.801460] Exception stack(0xa8423fb0 to 0xa8423ff8)
[   52.806521] 3fa0:                                     00000000 00000000 00000000 00000000
[   52.814711] 3fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
[   52.822898] 3fe0: 00000000 00000000 00000000 00000000 00000013 00000000
[   52.829521]  r10:00000000 r9:00000000 r8:00000000 r7:00000000 r6:00000000 r5:80168360
[   52.837356]  r4:a840cd00

This patch corrects the locking/unlocking sequence for the two mutexes
to fix the deadlock issue.

Reviewed-by: Sandor Yu <sandor.yu@nxp.com>
Signed-off-by: Liu Ying <victor.liu@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Liu Ying 2020-05-21 12:42:54 +08:00
parent ba8559b45b
commit c18ff376db
1 changed files with 15 additions and 5 deletions

View File

@ -1,6 +1,6 @@
/*
* Copyright 2005-2015 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2019 NXP
* Copyright 2019,2020 NXP
*/
/*
@ -1275,7 +1275,6 @@ static int _get_vdoa_ipu_res(struct ipu_task_entry *t)
uint32_t found_vdoa = 0;
struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
mutex_lock(&tbl->lock);
if (t->set.mode & VDOA_MODE) {
if (NULL != t->vdoa_handle)
found_vdoa = 1;
@ -1358,7 +1357,7 @@ out:
dev_dbg(t->dev,
"%s:no:0x%x,found_vdoa:%d, found_ipu:%d\n",
__func__, t->task_no, found_vdoa, found_ipu);
mutex_unlock(&tbl->lock);
if (t->set.task & VDOA_ONLY)
return found_vdoa;
else if (t->set.mode & VDOA_MODE)
@ -1373,7 +1372,6 @@ static void put_vdoa_ipu_res(struct ipu_task_entry *tsk, int vdoa_only)
int rel_vdoa = 0, rel_ipu = 0;
struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
mutex_lock(&tbl->lock);
if (tsk->set.mode & VDOA_MODE) {
if (!tbl->vdoa_used && tsk->vdoa_handle)
dev_err(tsk->dev,
@ -1408,7 +1406,6 @@ out:
dev_dbg(tsk->dev,
"%s:no:0x%x,rel_vdoa:%d, rel_ipu:%d\n",
__func__, tsk->task_no, rel_vdoa, rel_ipu);
mutex_unlock(&tbl->lock);
}
static int get_vdoa_ipu_res(struct ipu_task_entry *t)
@ -3047,11 +3044,22 @@ static void get_res_do_task(struct ipu_task_entry *t)
uint32_t found;
uint32_t split_child;
struct mutex *lock;
struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
mutex_lock(&tbl->lock);
found = get_vdoa_ipu_res(t);
if (!found) {
dev_err(t->dev, "ERR:[0x%p] no-0x%x can not get res\n",
t, t->task_no);
if ((t->set.mode & VDOA_MODE) &&
tbl->vdoa_used && t->vdoa_handle) {
tbl->vdoa_used = 0;
vdoa_put_handle(&t->vdoa_handle);
}
mutex_unlock(&tbl->lock);
return;
} else {
if (t->set.task & VDOA_ONLY)
@ -3070,6 +3078,8 @@ static void get_res_do_task(struct ipu_task_entry *t)
t, t->task_no, state_msg[t->state].msg);
}
mutex_unlock(&tbl->lock);
split_child = need_split(t) && t->parent;
if (split_child) {
lock = &t->parent->split_lock;