block: fix elvpriv allocation failure handling
Request allocation is mempool backed to guarantee forward progress under memory pressure; unfortunately, this property got broken while adding elvpriv data. Failures during elvpriv allocation, including ioc and icq creation failures, currently make get_request() fail as whole. There's no forward progress guarantee for these allocations - they may fail indefinitely under memory pressure stalling IO and deadlocking the system. This patch updates get_request() such that elvpriv allocation failure doesn't make the whole function fail. If elvpriv allocation fails, the allocation is degraded into !ELVPRIV. This will force the request to ELEVATOR_INSERT_BACK disturbing scheduling but elvpriv alloc failures should be rare (nothing is per-request) and anything is better than deadlocking. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>hifive-unleashed-5.1
parent
29e2b09ab5
commit
aaf7c68068
|
@ -29,6 +29,7 @@
|
||||||
#include <linux/fault-inject.h>
|
#include <linux/fault-inject.h>
|
||||||
#include <linux/list_sort.h>
|
#include <linux/list_sort.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
#include <linux/ratelimit.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/block.h>
|
#include <trace/events/block.h>
|
||||||
|
@ -930,17 +931,6 @@ retry:
|
||||||
rw_flags |= REQ_IO_STAT;
|
rw_flags |= REQ_IO_STAT;
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
/* create icq if missing */
|
|
||||||
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
|
|
||||||
create_io_context(gfp_mask, q->node);
|
|
||||||
ioc = rq_ioc(bio);
|
|
||||||
if (!ioc)
|
|
||||||
goto fail_alloc;
|
|
||||||
icq = ioc_create_icq(ioc, q, gfp_mask);
|
|
||||||
if (!icq)
|
|
||||||
goto fail_alloc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* allocate and init request */
|
/* allocate and init request */
|
||||||
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
|
||||||
if (!rq)
|
if (!rq)
|
||||||
|
@ -949,17 +939,28 @@ retry:
|
||||||
blk_rq_init(q, rq);
|
blk_rq_init(q, rq);
|
||||||
rq->cmd_flags = rw_flags | REQ_ALLOCED;
|
rq->cmd_flags = rw_flags | REQ_ALLOCED;
|
||||||
|
|
||||||
|
/* init elvpriv */
|
||||||
if (rw_flags & REQ_ELVPRIV) {
|
if (rw_flags & REQ_ELVPRIV) {
|
||||||
rq->elv.icq = icq;
|
if (unlikely(et->icq_cache && !icq)) {
|
||||||
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
|
create_io_context(gfp_mask, q->node);
|
||||||
mempool_free(rq, q->rq.rq_pool);
|
ioc = rq_ioc(bio);
|
||||||
goto fail_alloc;
|
if (!ioc)
|
||||||
|
goto fail_elvpriv;
|
||||||
|
|
||||||
|
icq = ioc_create_icq(ioc, q, gfp_mask);
|
||||||
|
if (!icq)
|
||||||
|
goto fail_elvpriv;
|
||||||
}
|
}
|
||||||
/* @rq->elv.icq holds on to io_context until @rq is freed */
|
|
||||||
|
rq->elv.icq = icq;
|
||||||
|
if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
|
||||||
|
goto fail_elvpriv;
|
||||||
|
|
||||||
|
/* @rq->elv.icq holds io_context until @rq is freed */
|
||||||
if (icq)
|
if (icq)
|
||||||
get_io_context(icq->ioc);
|
get_io_context(icq->ioc);
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
/*
|
/*
|
||||||
* ioc may be NULL here, and ioc_batching will be false. That's
|
* ioc may be NULL here, and ioc_batching will be false. That's
|
||||||
* OK, if the queue is under the request limit then requests need
|
* OK, if the queue is under the request limit then requests need
|
||||||
|
@ -972,6 +973,24 @@ retry:
|
||||||
trace_block_getrq(q, bio, rw_flags & 1);
|
trace_block_getrq(q, bio, rw_flags & 1);
|
||||||
return rq;
|
return rq;
|
||||||
|
|
||||||
|
fail_elvpriv:
|
||||||
|
/*
|
||||||
|
* elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
|
||||||
|
* and may fail indefinitely under memory pressure and thus
|
||||||
|
* shouldn't stall IO. Treat this request as !elvpriv. This will
|
||||||
|
* disturb iosched and blkcg but weird is bettern than dead.
|
||||||
|
*/
|
||||||
|
printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
|
||||||
|
dev_name(q->backing_dev_info.dev));
|
||||||
|
|
||||||
|
rq->cmd_flags &= ~REQ_ELVPRIV;
|
||||||
|
rq->elv.icq = NULL;
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
rl->elvpriv--;
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
goto out;
|
||||||
|
|
||||||
fail_alloc:
|
fail_alloc:
|
||||||
/*
|
/*
|
||||||
* Allocation failed presumably due to memory. Undo anything we
|
* Allocation failed presumably due to memory. Undo anything we
|
||||||
|
|
Loading…
Reference in New Issue