1
0
Fork 0

blk-mq: insert passthrough request into hctx->dispatch directly

[ Upstream commit 01e99aeca3 ]

For some reason, device may be in one situation which can't handle
FS request, so STS_RESOURCE is always returned and the FS request
will be added to hctx->dispatch. However passthrough request may
be required at that time for fixing the problem. If passthrough
request is added to scheduler queue, there isn't any chance for
blk-mq to dispatch it given we prioritize requests in hctx->dispatch.
Then the FS IO request may never be completed, and IO hang is caused.

So passthrough request has to be added to hctx->dispatch directly
for fixing the IO hang.

Fix this issue by inserting passthrough request into hctx->dispatch
directly together withing adding FS request to the tail of
hctx->dispatch in blk_mq_dispatch_rq_list(). Actually we add FS request
to tail of hctx->dispatch at default, see blk_mq_request_bypass_insert().

Then it becomes consistent with original legacy IO request
path, in which passthrough request is always added to q->queue_head.

Cc: Dongli Zhang <dongli.zhang@oracle.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Ewan D. Milne <emilne@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
Ming Lei 2020-02-25 09:04:32 +08:00 committed by Greg Kroah-Hartman
parent 74886a6d9d
commit 74c77d6a4e
4 changed files with 29 additions and 16 deletions

View File

@ -399,7 +399,7 @@ void blk_insert_flush(struct request *rq)
*/ */
if ((policy & REQ_FSEQ_DATA) && if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
blk_mq_request_bypass_insert(rq, false); blk_mq_request_bypass_insert(rq, false, false);
return; return;
} }

View File

@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
bool has_sched, bool has_sched,
struct request *rq) struct request *rq)
{ {
/* dispatch flush rq directly */ /*
if (rq->rq_flags & RQF_FLUSH_SEQ) { * dispatch flush and passthrough rq directly
spin_lock(&hctx->lock); *
list_add(&rq->queuelist, &hctx->dispatch); * passthrough request has to be added to hctx->dispatch directly.
spin_unlock(&hctx->lock); * For some reason, device may be in one situation which can't
* handle FS request, so STS_RESOURCE is always returned and the
* FS request will be added to hctx->dispatch. However passthrough
* request may be required at that time for fixing the problem. If
* passthrough request is added to scheduler queue, there isn't any
* chance to dispatch it given we prioritize requests in hctx->dispatch.
*/
if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
return true; return true;
}
if (has_sched) if (has_sched)
rq->rq_flags |= RQF_SORTED; rq->rq_flags |= RQF_SORTED;
@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
WARN_ON(e && (rq->tag != -1)); WARN_ON(e && (rq->tag != -1));
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
blk_mq_request_bypass_insert(rq, at_head, false);
goto run; goto run;
}
if (e && e->type->ops.insert_requests) { if (e && e->type->ops.insert_requests) {
LIST_HEAD(list); LIST_HEAD(list);

View File

@ -761,7 +761,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
* merge. * merge.
*/ */
if (rq->rq_flags & RQF_DONTPREP) if (rq->rq_flags & RQF_DONTPREP)
blk_mq_request_bypass_insert(rq, false); blk_mq_request_bypass_insert(rq, false, false);
else else
blk_mq_sched_insert_request(rq, true, false, false); blk_mq_sched_insert_request(rq, true, false, false);
} }
@ -1313,7 +1313,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
q->mq_ops->commit_rqs(hctx); q->mq_ops->commit_rqs(hctx);
spin_lock(&hctx->lock); spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch); list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock); spin_unlock(&hctx->lock);
/* /*
@ -1668,12 +1668,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* Should only be used carefully, when the caller knows we want to * Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device. * bypass a potential IO scheduler on the target device.
*/ */
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue) void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue)
{ {
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock); spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch); if (at_head)
list_add(&rq->queuelist, &hctx->dispatch);
else
list_add_tail(&rq->queuelist, &hctx->dispatch);
spin_unlock(&hctx->lock); spin_unlock(&hctx->lock);
if (run_queue) if (run_queue)
@ -1863,7 +1867,7 @@ insert:
if (bypass_insert) if (bypass_insert)
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
blk_mq_request_bypass_insert(rq, run_queue); blk_mq_request_bypass_insert(rq, false, run_queue);
return BLK_STS_OK; return BLK_STS_OK;
} }
@ -1879,7 +1883,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, true); blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK) else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret); blk_mq_end_request(rq, ret);
@ -1913,7 +1917,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_STS_OK) { if (ret != BLK_STS_OK) {
if (ret == BLK_STS_RESOURCE || if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) { ret == BLK_STS_DEV_RESOURCE) {
blk_mq_request_bypass_insert(rq, blk_mq_request_bypass_insert(rq, false,
list_empty(list)); list_empty(list));
break; break;
} }

View File

@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
*/ */
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head); bool at_head);
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list); struct list_head *list);