1
0
Fork 0

blk-mq-rdma: pass in queue map to blk_mq_rdma_map_queues

Will be used by nvme-rdma for queue map separation support.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
hifive-unleashed-5.1
Sagi Grimberg 2018-12-11 23:38:54 -08:00 committed by Christoph Hellwig
parent 23454d59cc
commit e42b3867de
3 changed files with 6 additions and 6 deletions

View File

@ -29,24 +29,24 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping. * vector, we fallback to the naive mapping.
*/ */
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec) struct ib_device *dev, int first_vec)
{ {
const struct cpumask *mask; const struct cpumask *mask;
unsigned int queue, cpu; unsigned int queue, cpu;
for (queue = 0; queue < set->nr_hw_queues; queue++) { for (queue = 0; queue < map->nr_queues; queue++) {
mask = ib_get_vector_affinity(dev, first_vec + queue); mask = ib_get_vector_affinity(dev, first_vec + queue);
if (!mask) if (!mask)
goto fallback; goto fallback;
for_each_cpu(cpu, mask) for_each_cpu(cpu, mask)
set->map[0].mq_map[cpu] = queue; map->mq_map[cpu] = map->queue_offset + queue;
} }
return 0; return 0;
fallback: fallback:
return blk_mq_map_queues(&set->map[0]); return blk_mq_map_queues(map);
} }
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues); EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);

View File

@ -1751,7 +1751,7 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_rdma_ctrl *ctrl = set->driver_data; struct nvme_rdma_ctrl *ctrl = set->driver_data;
return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0); return blk_mq_rdma_map_queues(&set->map[0], ctrl->device->dev, 0);
} }
static const struct blk_mq_ops nvme_rdma_mq_ops = { static const struct blk_mq_ops nvme_rdma_mq_ops = {

View File

@ -4,7 +4,7 @@
struct blk_mq_tag_set; struct blk_mq_tag_set;
struct ib_device; struct ib_device;
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec); struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */ #endif /* _LINUX_BLK_MQ_RDMA_H */