1
0
Fork 0

nvme-pci: align io queue count with allocted nvme_queue in nvme_probe

[ Upstream commit 2a5bcfdd41 ]

Since commit 147b27e4bd ("nvme-pci: allocate device queues storage
space at probe"), nvme_alloc_queue does not alloc the nvme queues
itself anymore.

If the write/poll_queues module parameters are changed at runtime to
values larger than the number of allocated queues in nvme_probe,
nvme_alloc_queue will access unallocated memory.

Add a new nr_allocated_queues member to struct nvme_dev to record how
many queues were alloctated in nvme_probe to avoid using more than the
allocated queues after a reset following a change to the
write/poll_queues module parameters.

Also add nr_write_queues and nr_poll_queues members to allow refreshing
the number of write and poll queues based on a change to the module
parameters when resetting the controller.

Fixes: 147b27e4bd ("nvme-pci: allocate device queues storage space at probe")
Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
[hch: add nvme_max_io_queues, update the commit message]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
Weiping Zhang 2020-05-02 15:29:41 +08:00 committed by Greg Kroah-Hartman
parent be7f2672c3
commit a09fc96a3d
1 changed files with 33 additions and 24 deletions

View File

@ -128,6 +128,9 @@ struct nvme_dev {
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
unsigned int nr_allocated_queues;
unsigned int nr_write_queues;
unsigned int nr_poll_queues;
};
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
@ -210,25 +213,14 @@ struct nvme_iod {
struct scatterlist *sg;
};
static unsigned int max_io_queues(void)
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
{
return num_possible_cpus() + write_queues + poll_queues;
}
static unsigned int max_queue_count(void)
{
/* IO queues + admin queue */
return 1 + max_io_queues();
}
static inline unsigned int nvme_dbbuf_size(u32 stride)
{
return (max_queue_count() * 8 * stride);
return dev->nr_allocated_queues * 8 * dev->db_stride;
}
static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs)
return 0;
@ -253,7 +245,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs) {
dma_free_coherent(dev->dev, mem_size,
@ -2030,7 +2022,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
{
struct nvme_dev *dev = affd->priv;
unsigned int nr_read_queues;
unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
/*
* If there is no interupt available for queues, ensure that
@ -2046,12 +2038,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
if (!nrirqs) {
nrirqs = 1;
nr_read_queues = 0;
} else if (nrirqs == 1 || !write_queues) {
} else if (nrirqs == 1 || !nr_write_queues) {
nr_read_queues = 0;
} else if (write_queues >= nrirqs) {
} else if (nr_write_queues >= nrirqs) {
nr_read_queues = 1;
} else {
nr_read_queues = nrirqs - write_queues;
nr_read_queues = nrirqs - nr_write_queues;
}
dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
@ -2075,7 +2067,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
* Poll queues don't need interrupts, but we need at least one IO
* queue left over for non-polled IO.
*/
this_p_queues = poll_queues;
this_p_queues = dev->nr_poll_queues;
if (this_p_queues >= nr_io_queues) {
this_p_queues = nr_io_queues - 1;
irq_queues = 1;
@ -2105,14 +2097,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
__nvme_disable_io_queues(dev, nvme_admin_delete_cq);
}
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
{
return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned int nr_io_queues;
unsigned long size;
int result;
nr_io_queues = max_io_queues();
/*
* Sample the module parameters once at reset time so that we have
* stable values to work with.
*/
dev->nr_write_queues = write_queues;
dev->nr_poll_queues = poll_queues;
/*
* If tags are shared with admin queue (Apple bug), then
@ -2120,6 +2123,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
nr_io_queues = 1;
else
nr_io_queues = min(nvme_max_io_queues(dev),
dev->nr_allocated_queues - 1);
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
@ -2794,8 +2800,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev)
return -ENOMEM;
dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
GFP_KERNEL, node);
dev->nr_write_queues = write_queues;
dev->nr_poll_queues = poll_queues;
dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
dev->queues = kcalloc_node(dev->nr_allocated_queues,
sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;