1
0
Fork 0

block: don't call ioc_exit_icq() with the queue lock held for blk-mq

For legacy scheduling, we always call ioc_exit_icq() with both the
ioc and queue lock held. This poses a problem for blk-mq with
scheduling, since the queue lock isn't what we use in the scheduler.
And since we don't need the queue lock held for ioc exit there,
don't grab it and leave any extra locking up to the blk-mq scheduler.

Reported-by: Paolo Valente <paolo.valente@linaro.org>
Tested-by: Paolo Valente <paolo.valente@linaro.org>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
hifive-unleashed-5.1
Jens Axboe 2017-03-02 13:59:08 -07:00
parent a5a79d0001
commit 7b36a7189f
3 changed files with 31 additions and 17 deletions

View File

@ -36,8 +36,8 @@ static void icq_free_icq_rcu(struct rcu_head *head)
}
/*
* Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
* mq.
* Exit an icq. Called with ioc locked for blk-mq, and with both ioc
* and queue locked for legacy.
*/
static void ioc_exit_icq(struct io_cq *icq)
{
@ -54,7 +54,10 @@ static void ioc_exit_icq(struct io_cq *icq)
icq->flags |= ICQ_EXITED;
}
/* Release an icq. Called with both ioc and q locked. */
/*
* Release an icq. Called with ioc locked for blk-mq, and with both ioc
* and queue locked for legacy.
*/
static void ioc_destroy_icq(struct io_cq *icq)
{
struct io_context *ioc = icq->ioc;
@ -62,7 +65,6 @@ static void ioc_destroy_icq(struct io_cq *icq)
struct elevator_type *et = q->elevator->type;
lockdep_assert_held(&ioc->lock);
lockdep_assert_held(q->queue_lock);
radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node);
@ -222,24 +224,40 @@ void exit_io_context(struct task_struct *task)
put_io_context_active(ioc);
}
static void __ioc_clear_queue(struct list_head *icq_list)
{
unsigned long flags;
while (!list_empty(icq_list)) {
struct io_cq *icq = list_entry(icq_list->next,
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irqsave(&ioc->lock, flags);
ioc_destroy_icq(icq);
spin_unlock_irqrestore(&ioc->lock, flags);
}
}
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
*
* Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
* Walk @q->icq_list and exit all io_cq's.
*/
void ioc_clear_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
LIST_HEAD(icq_list);
while (!list_empty(&q->icq_list)) {
struct io_cq *icq = list_entry(q->icq_list.next,
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irq(q->queue_lock);
list_splice_init(&q->icq_list, &icq_list);
spin_lock(&ioc->lock);
ioc_destroy_icq(icq);
spin_unlock(&ioc->lock);
if (q->mq_ops) {
spin_unlock_irq(q->queue_lock);
__ioc_clear_queue(&icq_list);
} else {
__ioc_clear_queue(&icq_list);
spin_unlock_irq(q->queue_lock);
}
}

View File

@ -815,9 +815,7 @@ static void blk_release_queue(struct kobject *kobj)
blkcg_exit_queue(q);
if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
spin_unlock_irq(q->queue_lock);
elevator_exit(q->elevator);
}

View File

@ -983,9 +983,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (old_registered)
elv_unregister_queue(q);
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
spin_unlock_irq(q->queue_lock);
}
/* allocate, init and register new elevator */