1
0
Fork 0

block: only allow polling if a poll queue_map exists

This avoids having to have differnet mq_ops for different setups
with or without poll queues.

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.1
Christoph Hellwig 2018-12-02 17:46:27 +01:00 committed by Jens Axboe
parent 529262d56d
commit 376f7ef8bf
2 changed files with 10 additions and 21 deletions

View File

@ -402,7 +402,7 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
unsigned long poll_on;
ssize_t ret;
if (!q->mq_ops || !q->mq_ops->poll)
if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL)
return -EINVAL;
ret = queue_var_store(&poll_on, page, count);

View File

@ -1602,22 +1602,15 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.timeout = nvme_timeout,
};
#define NVME_SHARED_MQ_OPS \
.queue_rq = nvme_queue_rq, \
.commit_rqs = nvme_commit_rqs, \
.complete = nvme_pci_complete_rq, \
.init_hctx = nvme_init_hctx, \
.init_request = nvme_init_request, \
.map_queues = nvme_pci_map_queues, \
.timeout = nvme_timeout \
static const struct blk_mq_ops nvme_mq_ops = {
NVME_SHARED_MQ_OPS,
};
static const struct blk_mq_ops nvme_mq_poll_ops = {
NVME_SHARED_MQ_OPS,
.poll = nvme_poll,
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
};
static void nvme_dev_remove_admin(struct nvme_dev *dev)
@ -2304,11 +2297,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
int ret;
if (!dev->ctrl.tagset) {
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.ops = &nvme_mq_poll_ops;
else
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
dev->tagset.nr_maps = HCTX_MAX_TYPES;
dev->tagset.timeout = NVME_IO_TIMEOUT;