diff --git a/block/blk-mq.c b/block/blk-mq.c index 961635b40999..b8738b3c6d06 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu) return 0; } -static void __blk_mq_complete_request(struct request *rq) + +static void __blk_mq_complete_request_remote(void *data) { + struct request *rq = data; + /* * For most of single queue controllers, there is only one irq vector * for handling I/O completion, and the only irq's affinity is set @@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq) rq->q->mq_ops->complete(rq); } -static void __blk_mq_complete_request_remote(void *data) -{ - __blk_mq_complete_request(data); -} - static inline bool blk_mq_complete_need_ipi(struct request *rq) { int cpu = raw_smp_processor_id(); @@ -672,6 +670,32 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } +bool blk_mq_complete_request_remote(struct request *rq) +{ + WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); + + /* + * For a polled request, always complete locallly, it's pointless + * to redirect the completion. + */ + if (rq->cmd_flags & REQ_HIPRI) + return false; + + if (blk_mq_complete_need_ipi(rq)) { + rq->csd.func = __blk_mq_complete_request_remote; + rq->csd.info = rq; + rq->csd.flags = 0; + smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); + } else { + if (rq->q->nr_hw_queues > 1) + return false; + blk_mq_trigger_softirq(rq); + } + + return true; +} +EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); + /** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed @@ -681,25 +705,8 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) **/ void blk_mq_complete_request(struct request *rq) { - WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); - - /* - * For a polled request, always complete locallly, it's pointless - * to redirect the completion. - */ - if (rq->cmd_flags & REQ_HIPRI) { + if (!blk_mq_complete_request_remote(rq)) rq->q->mq_ops->complete(rq); - return; - } - - if (blk_mq_complete_need_ipi(rq)) { - rq->csd.func = __blk_mq_complete_request_remote; - rq->csd.info = rq; - rq->csd.flags = 0; - smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); - } else { - __blk_mq_complete_request(rq); - } } EXPORT_SYMBOL(blk_mq_complete_request); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8e6ab766aef7..1641ec6cd7e5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_complete_request(struct request *rq); +bool blk_mq_complete_request_remote(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q);