1
0
Fork 0

blk-mq: Allow PCI vector offset for mapping queues

The PCI interrupt vectors intended to be associated with a queue may
not start at 0; a driver may allocate pre_vectors for special use. This
patch adds an offset parameter so blk-mq may find the intended affinity
mask and updates all drivers using this API accordingly.

Cc: Don Brace <don.brace@microsemi.com>
Cc: <qla2xxx-upstream@qlogic.com>
Cc: <linux-scsi@vger.kernel.org>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.1
Keith Busch 2018-03-27 09:39:06 -06:00 committed by Jens Axboe
parent 3148ffbdb9
commit f23f5bece6
5 changed files with 9 additions and 6 deletions

View File

@ -21,6 +21,7 @@
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
* @set: tagset to provide the mapping for
* @pdev: PCI device associated with @set.
* @offset: Offset to use for the pci irq vector
*
* This function assumes the PCI device @pdev has at least as many available
* interrupt vectors as @set has queues. It will then query the vector
@ -28,13 +29,14 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
for (queue = 0; queue < set->nr_hw_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue);
mask = pci_irq_get_affinity(pdev, queue + offset);
if (!mask)
goto fallback;

View File

@ -414,7 +414,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0);
}
/**

View File

@ -6805,7 +6805,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw))
rc = blk_mq_map_queues(&shost->tag_set);
else
rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
return rc;
}

View File

@ -5348,7 +5348,7 @@ static int pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,

View File

@ -5,6 +5,7 @@
struct blk_mq_tag_set;
struct pci_dev;
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */