block: drop unnecessary blk_get/put_queue() in scsi_cmd_ioctl() and blk_get_tg()
blk_get/put_queue() in scsi_cmd_ioctl() and throtl_get_tg() are completely bogus. The caller must have a reference to the queue on entry and taking an extra reference doesn't change anything. For scsi_cmd_ioctl(), the only effect is that it ends up checking QUEUE_FLAG_DEAD on entry; however, this is bogus as queue can die right after blk_get_queue(). Dead queue should be and is handled in request issue path (it's somewhat broken now but that's a separate problem and doesn't affect this one much). throtl_get_tg() incorrectly assumes that q is rcu freed. Also, it doesn't check return value of blk_get_queue(). If the queue is already dead, it ends up doing an extra put. Drop them. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>hifive-unleashed-5.1
parent
75eb6c372d
commit
315fceee81
|
@ -324,12 +324,8 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||||
/*
|
/*
|
||||||
* Need to allocate a group. Allocation of group also needs allocation
|
* Need to allocate a group. Allocation of group also needs allocation
|
||||||
* of per cpu stats which in-turn takes a mutex() and can block. Hence
|
* of per cpu stats which in-turn takes a mutex() and can block. Hence
|
||||||
* we need to drop rcu lock and queue_lock before we call alloc
|
* we need to drop rcu lock and queue_lock before we call alloc.
|
||||||
*
|
|
||||||
* Take the request queue reference to make sure queue does not
|
|
||||||
* go away once we return from allocation.
|
|
||||||
*/
|
*/
|
||||||
blk_get_queue(q);
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
@ -339,13 +335,11 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||||
* dead
|
* dead
|
||||||
*/
|
*/
|
||||||
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
|
||||||
blk_put_queue(q);
|
|
||||||
if (tg)
|
if (tg)
|
||||||
kfree(tg);
|
kfree(tg);
|
||||||
|
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
}
|
}
|
||||||
blk_put_queue(q);
|
|
||||||
|
|
||||||
/* Group allocated and queue is still alive. take the lock */
|
/* Group allocated and queue is still alive. take the lock */
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
|
@ -565,7 +565,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!q || blk_get_queue(q))
|
if (!q)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
|
@ -686,7 +686,6 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
|
||||||
err = -ENOTTY;
|
err = -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_put_queue(q);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(scsi_cmd_ioctl);
|
EXPORT_SYMBOL(scsi_cmd_ioctl);
|
||||||
|
|
Loading…
Reference in New Issue